python-3.x 使用langchain和openai创建聊天代理时没有属性错误

bqjvbblv  于 2023-11-20  发布在  Python
关注(0)|答案(2)|浏览(225)

我尝试使用下面的Python代码测试聊天代理。我使用langchain代理和langchain的工具。我定义了几个简单的函数,当提示符提到与工具相关的内容时,LLM将用作工具。我使用openai gpt-3.5-turbo模型用于LLM。当尝试运行conversational_时,我得到下面的错误消息。代理一个简单的提示返回一个随机数。为工具定义的函数应该很容易做到这一点。我得到下面的错误消息,提到openai没有属性。有人看到问题可能是什么,你能建议如何修复它吗?
验证码:

from config import api_key,new_personal_api_key

apikey=new_personal_api_key

# apikey=api_key

import os

os.environ['OPENAI_API_KEY'] = apikey

from langchain.chains.conversation.memory import ConversationBufferWindowMemory

from langchain.agents import Tool
from langchain.tools import BaseTool

def meaning_of_life(input=""):
    return 'The meaning of life is 42 if rounded but is actually 42.17658'
    
life_tool = Tool(
    name='Meaning of Life',
    func= meaning_of_life,
    description="Useful for when you need to answer questions about the meaning of life. input should be MOL "
)

import random

def random_num(input=""):
    return random.randint(0,5)
    
    
random_tool = Tool(
    name='Random number',
    func= random_num,
    description="Useful for when you need to get a random number. input should be 'random'"
)

from langchain import OpenAI 
from langchain.chat_models import ChatOpenAI

# Set up the turbo LLM
turbo_llm = ChatOpenAI(
    temperature=0,
    model_name='gpt-3.5-turbo'
)


from langchain.agents import initialize_agent

tools = [random_tool, life_tool]

# conversational agent memory
memory = ConversationBufferWindowMemory(
    memory_key='chat_history',
    k=3,
    return_messages=True
)

# create our agent
conversational_agent = initialize_agent(
    agent='chat-conversational-react-description',
    tools=tools,
    llm=turbo_llm,
#     llm=local_llm,
    verbose=True,
    max_iterations=3,
    early_stopping_method='generate',
    memory=memory,
    handle_parsing_errors=True
)

conversational_agent('Can you give me a random number?')

字符串
错误:

> Entering new AgentExecutor chain...

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[12], line 1
----> 1 conversational_agent('Can you give me a random number?')

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:310, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    308 except BaseException as e:
    309     run_manager.on_chain_error(e)
--> 310     raise e
    311 run_manager.on_chain_end(outputs)
    312 final_outputs: Dict[str, Any] = self.prep_outputs(
    313     inputs, outputs, return_only_outputs
    314 )

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:304, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    297 run_manager = callback_manager.on_chain_start(
    298     dumpd(self),
    299     inputs,
    300     name=run_name,
    301 )
    302 try:
    303     outputs = (
--> 304         self._call(inputs, run_manager=run_manager)
    305         if new_arg_supported
    306         else self._call(inputs)
    307     )
    308 except BaseException as e:
    309     run_manager.on_chain_error(e)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/agents/agent.py:1146, in AgentExecutor._call(self, inputs, run_manager)
   1144 # We now enter the agent loop (until it returns something).
   1145 while self._should_continue(iterations, time_elapsed):
-> 1146     next_step_output = self._take_next_step(
   1147         name_to_tool_map,
   1148         color_mapping,
   1149         inputs,
   1150         intermediate_steps,
   1151         run_manager=run_manager,
   1152     )
   1153     if isinstance(next_step_output, AgentFinish):
   1154         return self._return(
   1155             next_step_output, intermediate_steps, run_manager=run_manager
   1156         )

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/agents/agent.py:933, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
    930     intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
    932     # Call the LLM to see what to do.
--> 933     output = self.agent.plan(
    934         intermediate_steps,
    935         callbacks=run_manager.get_child() if run_manager else None,
    936         **inputs,
    937     )
    938 except OutputParserException as e:
    939     if isinstance(self.handle_parsing_errors, bool):

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/agents/agent.py:546, in Agent.plan(self, intermediate_steps, callbacks, **kwargs)
    534 """Given input, decided what to do.
    535 
    536 Args:
   (...)
    543     Action specifying what tool to use.
    544 """
    545 full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
--> 546 full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
    547 return self.output_parser.parse(full_output)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:298, in LLMChain.predict(self, callbacks, **kwargs)
    283 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
    284     """Format prompt with kwargs and pass to LLM.
    285 
    286     Args:
   (...)
    296             completion = llm.predict(adjective="funny")
    297     """
--> 298     return self(kwargs, callbacks=callbacks)[self.output_key]

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:310, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    308 except BaseException as e:
    309     run_manager.on_chain_error(e)
--> 310     raise e
    311 run_manager.on_chain_end(outputs)
    312 final_outputs: Dict[str, Any] = self.prep_outputs(
    313     inputs, outputs, return_only_outputs
    314 )

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:304, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    297 run_manager = callback_manager.on_chain_start(
    298     dumpd(self),
    299     inputs,
    300     name=run_name,
    301 )
    302 try:
    303     outputs = (
--> 304         self._call(inputs, run_manager=run_manager)
    305         if new_arg_supported
    306         else self._call(inputs)
    307     )
    308 except BaseException as e:
    309     run_manager.on_chain_error(e)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:108, in LLMChain._call(self, inputs, run_manager)
    103 def _call(
    104     self,
    105     inputs: Dict[str, Any],
    106     run_manager: Optional[CallbackManagerForChainRun] = None,
    107 ) -> Dict[str, str]:
--> 108     response = self.generate([inputs], run_manager=run_manager)
    109     return self.create_outputs(response)[0]

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:120, in LLMChain.generate(self, input_list, run_manager)
    118 callbacks = run_manager.get_child() if run_manager else None
    119 if isinstance(self.llm, BaseLanguageModel):
--> 120     return self.llm.generate_prompt(
    121         prompts,
    122         stop,
    123         callbacks=callbacks,
    124         **self.llm_kwargs,
    125     )
    126 else:
    127     results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
    128         cast(List, prompts), {"callbacks": callbacks}
    129     )

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:459, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    451 def generate_prompt(
    452     self,
    453     prompts: List[PromptValue],
   (...)
    456     **kwargs: Any,
    457 ) -> LLMResult:
    458     prompt_messages = [p.to_messages() for p in prompts]
--> 459     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:349, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs)
    347         if run_managers:
    348             run_managers[i].on_llm_error(e)
--> 349         raise e
    350 flattened_outputs = [
    351     LLMResult(generations=[res.generations], llm_output=res.llm_output)
    352     for res in results
    353 ]
    354 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:339, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs)
    336 for i, m in enumerate(messages):
    337     try:
    338         results.append(
--> 339             self._generate_with_cache(
    340                 m,
    341                 stop=stop,
    342                 run_manager=run_managers[i] if run_managers else None,
    343                 **kwargs,
    344             )
    345         )
    346     except BaseException as e:
    347         if run_managers:

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:492, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    488     raise ValueError(
    489         "Asked to cache, but no cache found at `langchain.cache`."
    490     )
    491 if new_arg_supported:
--> 492     return self._generate(
    493         messages, stop=stop, run_manager=run_manager, **kwargs
    494     )
    495 else:
    496     return self._generate(messages, stop=stop, **kwargs)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/openai.py:365, in ChatOpenAI._generate(self, messages, stop, run_manager, stream, **kwargs)
    363 message_dicts, params = self._create_message_dicts(messages, stop)
    364 params = {**params, **kwargs}
--> 365 response = self.completion_with_retry(
    366     messages=message_dicts, run_manager=run_manager, **params
    367 )
    368 return self._create_chat_result(response)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/openai.py:297, in ChatOpenAI.completion_with_retry(self, run_manager, **kwargs)
    293 def completion_with_retry(
    294     self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
    295 ) -> Any:
    296     """Use tenacity to retry the completion call."""
--> 297     retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
    299     @retry_decorator
    300     def _completion_with_retry(**kwargs: Any) -> Any:
    301         return self.client.create(**kwargs)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/openai.py:77, in _create_retry_decorator(llm, run_manager)
     68 def _create_retry_decorator(
     69     llm: ChatOpenAI,
     70     run_manager: Optional[
     71         Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
     72     ] = None,
     73 ) -> Callable[[Any], Any]:
     74     import openai
     76     errors = [
---> 77         openai.error.Timeout,
     78         openai.error.APIError,
     79         openai.error.APIConnectionError,
     80         openai.error.RateLimitError,
     81         openai.error.ServiceUnavailableError,
     82     ]
     83     return create_base_retry_decorator(
     84         error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
     85     )

AttributeError: module 'openai' has no attribute 'error'

8ljdwjyq

8ljdwjyq1#

这似乎是版本不兼容的问题。我检查了开放的API pypi:


的数据
看起来像是昨天才更新的。可能langchain还没有更新,所以两个库都冲突。如果你降级你的openai sdk,它应该工作

qc6wkl3g

qc6wkl3g2#

这似乎是一个网络未连接的错误,导致openai错误。但openai没有错误。请通过pip install -U openai更新您的openai版本。
enter image description here

相关问题