File "/home/Test/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 166, in invoke
self.generate_prompt(
File "/home/Test/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 544, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File "/home/Test/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 408, in generate
raise e
File "/home/Test/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 398, in generate
self._generate_with_cache(
File "/home/Test/.local/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 577, in _generate_with_cache
return self._generate(
File "/home/Test/.local/lib/python3.10/site-packages/langchain_community/chat_models/ollama.py", line 255, in _generate
final_chunk = self._chat_stream_with_aggregation(
File "/home/Test/.local/lib/python3.10/site-packages/langchain_community/chat_models/ollama.py", line 188, in _chat_stream_with_aggregation
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
File "/home/Test/.local/lib/python3.10/site-packages/langchain_community/chat_models/ollama.py", line 161, in _create_chat_stream
yield from self._create_stream(
File "/home/Test/.local/lib/python3.10/site-packages/langchain_community/llms/ollama.py", line 240, in _create_stream
raise ValueError(
ValueError: Ollama call failed with status code 400. Details: unexpected server status: 1
ollama日志显示 level=ERROR source=prompt.go:86 msg="failed to encode prompt" err="unexpected server status: 1" 如果这对任何人有帮助。 如果需要,可以深入挖掘,因为即使对我来说也是非常可重复的,甚至使用".with_retry()"。
# Compile
app = workflow.compile()
# Test
from pprint import pprint
inputs = {"question": "What are the types of agent memory?"}
for output in app.stream(inputs): # throws the same error
编辑:根据@SinghJivjot的要求提供完整的错误跟踪信息
Traceback (most recent call last):
File "/home/yan/code_ws/rag-experiment/langgraph_rag_agent_llama3.py", line 421, in <module>
for output in app.stream(inputs):
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 710, in stream
_panic_or_proceed(done, inflight, step)
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langgraph/pregel/__init__.py", line 1126, in _panic_or_proceed
raise exc
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2499, in invoke
input = step.invoke(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 3963, in invoke
return self._call_with_config(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 1626, in _call_with_config
context.run(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/config.py", line 347, in call_func_with_variable_args
return func(input, **kwargs) # type: ignore[call-arg]
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 3837, in _invoke
output = call_func_with_variable_args(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/config.py", line 347, in call_func_with_variable_args
return func(input, **kwargs) # type: ignore[call-arg]
File "/home/yan/code_ws/rag-experiment/langgraph_rag_agent_llama3.py", line 247, in grade_documents
score = retrieval_grader.invoke({"question": question, "document": d.page_content})
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/runnables/base.py", line 2499, in invoke
input = step.invoke(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 158, in invoke
self.generate_prompt(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 560, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 421, in generate
raise e
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 411, in generate
self._generate_with_cache(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py", line 632, in _generate_with_cache
result = self._generate(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_community/chat_models/ollama.py", line 259, in _generate
final_chunk = self._chat_stream_with_aggregation(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_community/chat_models/ollama.py", line 190, in _chat_stream_with_aggregation
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_community/chat_models/ollama.py", line 162, in _create_chat_stream
yield from self._create_stream(
File "/home/yan/miniconda3/envs/oscopilot/lib/python3.10/site-packages/langchain_community/llms/ollama.py", line 251, in _create_stream
raise ValueError(
ValueError: Ollama call failed with status code 400. Details: {"error":"unexpected server status: 1"}
root@8cb277eb03b5:/home/python# python test.py
Traceback (most recent call last):
File "/home/python/test.py", line 24, in <module>
result = smart_scraper_graph.run()
File "/usr/local/lib/python3.9/site-packages/scrapegraphai/graphs/smart_scraper_graph.py", line 116, in run
self.final_state, self.execution_info = self.graph.execute(inputs)
File "/usr/local/lib/python3.9/site-packages/scrapegraphai/graphs/base_graph.py", line 107, in execute
result = current_node.execute(state)
File "/usr/local/lib/python3.9/site-packages/scrapegraphai/nodes/generate_answer_node.py", line 141, in execute
answer = merge_chain.invoke(
File "/usr/local/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 2499, in invoke
input = step.invoke(
File "/usr/local/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 158, in invoke
self.generate_prompt(
File "/usr/local/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 560, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File "/usr/local/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 421, in generate
raise e
File "/usr/local/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 411, in generate
self._generate_with_cache(
File "/usr/local/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 632, in _generate_with_cache
result = self._generate(
File "/usr/local/lib/python3.9/site-packages/langchain_community/chat_models/ollama.py", line 259, in _generate
final_chunk = self._chat_stream_with_aggregation(
File "/usr/local/lib/python3.9/site-packages/langchain_community/chat_models/ollama.py", line 190, in _chat_stream_with_aggregation
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
File "/usr/local/lib/python3.9/site-packages/langchain_community/chat_models/ollama.py", line 162, in _create_chat_stream
yield from self._create_stream(
File "/usr/local/lib/python3.9/site-packages/langchain_community/llms/ollama.py", line 251, in _create_stream
raise ValueError(
ValueError: Ollama call failed with status code 400. Details: {"error":"unexpected server status: 1"}
9条答案
按热度按时间n3h0vuf21#
我有一个临时的解决方案,直到上游修复这个问题。你可以像这样添加一个回退选项:
rag_chain_fallback = prompt | llm | StrOutputParser()
rag_chain = rag_chain_fallback.with_fallbacks([rag_chain_fallback])
或者像这样添加一个'重试'选项:
rag_chain = prompt | llm | StrOutputParser()
rag_chain = rag_chain.with_retry()
z18hc3ub2#
感谢,如果遇到任何问题,我会尝试更新。
eagi6jfj3#
with_retry()目前运行良好,感谢@Bassileios。
其他用户请确认是否也遇到了这个问题。这有助于了解我不是唯一一个遇到这个问题的人。
xkrw2x1b4#
@SinghJivjot也有这个问题。
以相同的方式出现相同的错误。
imzjd6km5#
精确错误:
ollama日志显示
level=ERROR source=prompt.go:86 msg="failed to encode prompt" err="unexpected server status: 1"
如果这对任何人有帮助。
如果需要,可以深入挖掘,因为即使对我来说也是非常可重复的,甚至使用".with_retry()"。
ecr0jaav6#
重试在llama本地笔记本的所有链上都有效,除了这一行:
编辑:根据@SinghJivjot的要求提供完整的错误跟踪信息
Ollama服务器日志显示相同的“无法编码提示”错误:
mgdq6dx17#
@y22ma Please send the full trace
q9yhzks08#
在将Ollama更新到0.1.9版本后,有时可以采用以下方法解决问题。
hfwmuf9z9#
相同的问题,附上我的追踪信息: