langchain agent_executor.astream_events 与 ChatGroq 不兼容,

6uxekuva  于 2个月前  发布在  其他
关注(0)|答案(1)|浏览(31)

检查其他资源

  • 我为这个问题添加了一个非常描述性的标题。
  • 我使用LangChain文档的集成搜索来查找相关问题,但没有找到。
  • 我确信这是LangChain中的一个错误,而不是我的代码。
  • 该错误未通过更新到LangChain的最新稳定版本(或特定集成包)得到解决。

示例代码

from langchain_groq import ChatGroq
from langchain_community.tools import ddg_search
from langchain.prompts import ChatPromptTemplate
from langchain.agents import create_tool_calling_agent
from langchain.agents import AgentExecutor
llm = ChatGroq(temperature=0, model_name="llama-3.1-70b-versatile", api_key="", streaming=True)
ddg_search = DuckDuckGoSearchRun()
prompt = ChatPromptTemplate.from_messages([("system","You are a helpful Search Assistant"),
 ("human","{input}"),
 ("placeholder","{agent_scratchpad}")])
tools = [ddg_search]
search_agent = create_tool_calling_agent(llm,tools,prompt)
search_agent_executor = AgentExecutor(agent=search_agent, tools=tools, verbose=False, handle_parsing_errors=True)
async for event in search_agent_executor.astream_events(
 {"input": "who is narendra modi"}, version="v1"
):
 kind = event["event"]

if kind == "on_chat_model_stream":
content = event["data"]["chunk"].content
if content:
print(content, end="", flush=True)

错误信息和堆栈跟踪(如果适用)

AttributeError Traceback (most recent call last)
 File "<ipython-input-37>", line 1, in <module>
 async for event in search_agent_executor.astream_events(
     {"input": "who is narendra modi"}, version="v1"
 ):
KindError: 'only versions "v1" and "v2" of the schema is currently supported.'
1485 callbacks=run_manager.get_child() if run_manager else None,
1486 **inputs,
1487 )
1488 except OutputParserException as e:
1490 if isinstance(self.handle_parsing_errors, bool):
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain\agents\agent.py:619, in RunnableMultiActionAgent.aplan(self, intermediate_steps, callbacks, **kwargs)
611 final_output: Any = None
612 if self.stream_runnable:
613 # Use streaming to make sure that the underlying LLM is invoked in a
614 # streaming
(...)
617 # Because the response from the plan is not a generator, we need to
618 # accumulate the output into final output and return that.
-> 619 async for chunk in self.runnable.astream(
620 inputs, config={"callbacks": callbacks}
621 )
622 if final_output is None:
623 final_output = chunk
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\runnables\base.py:3278, in RunnableSequence.astream(self, input, config, **kwargs)
3275 async def input_aiter() -> AsyncIterator[Input]:
3276 yield input
-> 3278 async for chunk in self.atransform(input_aiter(), config, **kwargs):
3279 yield chunk
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\runnables\base.py:3261, in RunnableSequence.atransform(self, input, config, **kwargs)
3255 async def atransform(
3256 self,
3257 input: AsyncIterator[Input],
3258 config: Optional[RunnableConfig] = None,
3259 **kwargs: Optional[Any],
3260 ) -> AsyncIterator[Output]:
-> 3261 async for chunk in self._atransform_stream_with_config(
3262 input,
3263 self._atransform,
3264 patch_config(config, run_name=(config or {}).get("run_name") or self.name),
3265 **kwargs,
3266 ):
3267 yield chunk
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\tracers\log_stream.py:258, in LogStreamCallbackHandler.tap_output_aiter(self, run_id, output)
246 async def tap_output_aiter(
247 self, run_id: UUID, output: AsyncIterator[T]
248 ) -> AsyncIterator[T]:
(...)
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\runnables\base.py:3231, in RunnableSequence._atransform(self, input, run_manager, config, **kwargs)
3229 else:
3230 final_pipeline = step.atransform(final_pipeline, config)
-> 3231 async for output in final_pipeline:
3232 yield output
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\runnables\base.py:1313, in Runnable.atransform(self, input, config, **kwargs)
1310 final: Input
1311 got_first_val = False
-> 1312 final = ichunk
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\runnables\base.py:5276, in RunnableBindingBase.atransform(self, input, config, **kwargs)
5270 async def atransform(
5271 self,
5272 input: AsyncIterator[Input],
5273 config: Optional[RunnableConfig] = None,
5274 **kwargs: Any,
5275 ) -> AsyncIterator[Output]:
-> 5276 async for item in self.bound.atransform(
5277 input,
5278 self._merge_configs(config),
5279 **{**self.kwargs, **kwargs},
5280 ):
5281 yield item
File d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\runnables\base.py:1331, in Runnable.atransform(self, input, config, **kwargs)
1328 final = ichunk
1330 if got_first_val:
-> 1331 async for output in self.astream(final, config, **kwargs):
1332 yield output
文件 d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_core\language_models\chat_models.py:417,在 BaseChatModel.astream(self, input, config, stop, **kwargs) 函数中,第 415 行 generation: Optional[ChatGenerationChunk] = None
第 416 行 try:
> 第 417 行 async for chunk in self._astream(
> 第 418 行 messages,
> 第 419 行 stop=stop,
> 第 420 行 **kwargs,
> 第 421 行 ):
> 第 422 行 if chunk.message.id is None:
> 第 423 行 chunk.message.id = f"run-{run_manager.run_id}"
文件 d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_groq\chat_models.py:582,在 ChatGroq._astream(self, messages, stop, run_manager, **kwargs) 函数中,第 578 行 if "tools" in kwargs:
第 579 行 response = await self.async_client.create(
第 580 行 messages=message_dicts, **{**params, **kwargs}
第 581 行 )
> 第 582 行 chat_result = self._create_chat_result(response)
> 第 583 行 generation = chat_result.generations[0]
> 第 584 行 message = cast(AIMessage, generation.message)
文件 d:\Learning\Groq-Tool-Calling.venv\Lib\site-packages\langchain_groq\chat_models.py:665,在 ChatGroq._create_chat_result(self, response) 函数中,第 663 行 generations = []
第 664 行 if not isinstance(response, dict):
> 第 665 行 response = response.dict()
> 第 666 行 token_usage = response.get("usage", {})
> 第 667 行 for res in response["choices"]:
AttributeError: 'AsyncStream' object has no attribute 'dict'
mkh04yzy

mkh04yzy1#

你好,谢谢你报告这个问题。我能够复现这个问题。我相信在初始化聊天模型时省略 streaming=True 可以解决这个问题。
这仍然是一个bug,但希望这能解决你的用例。如果你继续遇到问题,请告诉我。
最小示例:

from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from langchain_groq import ChatGroq

@tool
def magic_function(input: int) -> int:
    """Applies a magic function to an input."""
    return input + 2

tools = [magic_function]
llm = ChatGroq(model="llama-3.1-70b-versatile").bind_tools(tools)

prompt = ChatPromptTemplate.from_messages(
    [
        ("system","You are a helpful Search Assistant"),
        ("human","{input}"),
        ("placeholder","{agent_scratchpad}"),
    ]
)

agent = create_tool_calling_agent(llm, tools, prompt)

agent_executor = AgentExecutor(
    agent=agent, tools=tools, verbose=False,
)

events = []
async for event in agent_executor.astream_events(
    {"input": "what is the value of magic_function(3)?"},
    version="v1",
):
    events.append(event)

相关问题