Failed to load mistral model in auto framework (0.4 and above) - Need help to solve this issue

from transformers import AutoModelForCausalLM, AutoTokenizer
from autogen_agentchat.agents import AssistantAgent
import torch


model_id = "/home/mony/ai_projects/ai-agent/mistralaiMistral-7B-Instruct-v0.3"


model_client = AutoModelForCausalLM.from_pretrained(model_id, 
                                    torch_dtype=torch.bfloat16, 
                                    device_map="auto")    


assistant =  AssistantAgent(name="assistent", model_client=model_client)


result = await assistant.run(task = "what's the captial of india?")
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[9], line 1
----> 1 result = await assistant.run(task = "what's the captial of india?")

File ~/ai_projects/ai-agent/venv/lib/python3.12/site-packages/autogen_agentchat/agents/_base_chat_agent.py:149, in BaseChatAgent.run(self, task, cancellation_token, output_task_messages)
    147         else:
    148             raise ValueError(f"Invalid message type in sequence: {type(msg)}")
--> 149 response = await self.on_messages(input_messages, cancellation_token)
    150 if response.inner_messages is not None:
    151     output_messages += response.inner_messages

File ~/ai_projects/ai-agent/venv/lib/python3.12/site-packages/autogen_agentchat/agents/_assistant_agent.py:896, in AssistantAgent.on_messages(self, messages, cancellation_token)
    882 async def on_messages(
    883     self,
    884     messages: Sequence[BaseChatMessage],
    885     cancellation_token: CancellationToken,
    886 ) -> Response:
    887     """Process incoming messages and generate a response.
    888 
    889     Args:
   (...)    894         Response containing the agent's reply
    895     """
--> 896     async for message in self.on_messages_stream(messages, cancellation_token):
    897         if isinstance(message, Response):
    898             return message

File ~/ai_projects/ai-agent/venv/lib/python3.12/site-packages/autogen_agentchat/agents/_assistant_agent.py:953, in AssistantAgent.on_messages_stream(self, messages, cancellation_token)
    951 # STEP 4: Run the first inference
    952 model_result = None
--> 953 async for inference_output in self._call_llm(
    954     model_client=model_client,
    955     model_client_stream=model_client_stream,
    956     system_messages=system_messages,
    957     model_context=model_context,
    958     workbench=workbench,
    959     handoff_tools=handoff_tools,
    960     agent_name=agent_name,
    961     cancellation_token=cancellation_token,
    962     output_content_type=output_content_type,
    963     message_id=message_id,
    964 ):
    965     if isinstance(inference_output, CreateResult):
    966         model_result = inference_output

File ~/ai_projects/ai-agent/venv/lib/python3.12/site-packages/autogen_agentchat/agents/_assistant_agent.py:1084, in AssistantAgent._call_llm(cls, model_client, model_client_stream, system_messages, model_context, workbench, handoff_tools, agent_name, cancellation_token, output_content_type, message_id)
   1067 """Call the language model with given context and configuration.
   1068 
   1069 Args:
   (...)   1081     Generator yielding model results or streaming chunks
   1082 """
   1083 all_messages = await model_context.get_messages()
-> 1084 llm_messages = cls._get_compatible_context(model_client=model_client, messages=system_messages + all_messages)
   1086 tools = [tool for wb in workbench for tool in await wb.list_tools()] + handoff_tools
   1088 if model_client_stream:

File ~/ai_projects/ai-agent/venv/lib/python3.12/site-packages/autogen_agentchat/agents/_assistant_agent.py:1640, in AssistantAgent._get_compatible_context(model_client, messages)
   1637 u/staticmethod
   1638 def _get_compatible_context(model_client: ChatCompletionClient, messages: List[LLMMessage]) -> Sequence[LLMMessage]:
   1639     """Ensure that the messages are compatible with the underlying client, by removing images if needed."""
-> 1640     if model_client.model_info["vision"]:
   1641         return messages
   1642     else:

File ~/ai_projects/ai-agent/venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1940, in Module.__getattr__(self, name)
   1938     if name in modules:
   1939         return modules[name]
-> 1940 raise AttributeError(
   1941     f"'{type(self).__name__}' object has no attribute '{name}'"
   1942 )

AttributeError: 'MistralForCausalLM' object has no attribute 'model_info'
1 Like

I’m not familiar with AutoGen, but it seems that with Hugging Face’s Transformers cannot be used as is. It seems easier to use it with Ollama for local models.

ohh , okay then i try with llama model . thank you

1 Like