from llama_index.legacy.response_synthesizers.base import BaseSynthesizer
class Accumulate(BaseSynthesizer):
def __init__(self, text_qa_template=None, service_context=None, output_cls=None, streaming=False, use_async=False):
super().__init__(service_context=service_context, streaming=streaming)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT_SEL
self._use_async = use_async
self._output_cls = output_cls
def synthesize(self, documents):
responses = []
for doc in documents:
if not doc.context: # Check if context is provided
continue # Skip the document if no context is provided
response = self._service_context.llm.predict(self._text_qa_template, context_str=doc.context)
responses.append(response)
return self._accumulate_responses(responses)
def _accumulate_responses(self, responses):
# Implement the logic to accumulate responses
return " ".join(responses)
在此示例中,修改了 Accumulate 类的 synthesize 方法,以检查每个文档的 context 属性是否已提供。如果没有提供上下文( if not doc.context ),则跳过该文档( continue )。这确保只处理并包含具有上下文的文档在最终输出 1(https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/response_synthesizers/accumulate.py)2(https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/response_synthesizers/factory.py) 中。
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.response_synthesizers import get_response_synthesizer, ResponseMode
from llama_index.core.service_context import ServiceContext
class CustomPromptTemplate(PromptTemplate):
def partial_format(self, query_str: str) -> str:
# Custom logic to check if the document provides context for the query
return f"""
{{% if context_str contains '{query_str}' %}}
Answer the following question based on the context: {query_str}
{{% else %}}
No relevant context found.
{{% endif %}}
"""
# Use the custom prompt template in the Accumulate synthesizer
custom_prompt_template = CustomPromptTemplate("{context_str}{query_str}", prompt_type="QUESTION_ANSWER")
service_context = ServiceContext.from_defaults()
accumulate_synthesizer = get_response_synthesizer(
service_context=service_context,
text_qa_template=custom_prompt_template,
response_mode=ResponseMode.ACCUMULATE,
)
# Example usage
query_str = "What is?"
texts = [
"This is bar",
"This is foo",
]
response = accumulate_synthesizer.get_response(text_chunks=texts, query_str=query_str)
print(response)
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.response_synthesizers.accumulate import Accumulate
# Define your custom prompt template
custom_prompt_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query in a detailed manner.\n"
"Query: {query_str}\n"
"Answer: "
)
custom_prompt = PromptTemplate(custom_prompt_str)
# Update the prompt in the Accumulate response synthesizer
accumulate_synthesizer = Accumulate()
accumulate_synthesizer.update_prompts(
{"text_qa_template": custom_prompt}
)
5条答案
按热度按时间c8ib6hqw1#
要修改
Accumulate
响应合成器,如果没有提供上下文,则完全跳过文档。您可以在Accumulate
类中添加一个检查,以确保它只处理具有上下文的文档。如果没有提供上下文,它应该跳过这些文档,并在最终输出中不包含它们的任何响应。以下是修改
Accumulate
类的示例:在此示例中,修改了
Accumulate
类的synthesize
方法,以检查每个文档的context
属性是否已提供。如果没有提供上下文(if not doc.context
),则跳过该文档(continue
)。这确保只处理并包含具有上下文的文档在最终输出 1(https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/response_synthesizers/accumulate.py)2(https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/response_synthesizers/factory.py) 中。pgccezyw2#
我们能否改变提示而不是做这件事?
30byixjq3#
是的,您可以在不修改
Accumulate
类的情况下更改累积响应合成器使用的提示,以跳过为不提供查询上下文的文档生成响应。您可以通过更新提示模板来实现这一点,将包含检查文档是否为查询提供上下文并在没有提供上下文时跳过生成响应的逻辑。以下是如何修改提示模板的示例:
此自定义提示模板使用条件语句检查
context_str
是否包含query_str
。如果包含,则生成响应;否则,返回“未找到相关上下文”。这样,您可以跳过为不提供查询上下文的文档生成响应。ao218c7q4#
如何调用
update_prompts
方法并将提示传递给Accumulate?f0brbegy5#
要调用
update_prompts
方法并在Accumulate
响应合成器中传递自定义提示,请按照以下步骤操作:update_prompts
方法更新Accumulate
响应合成器中的提示。以下是一个示例:
此代码片段定义了一个自定义提示模板,并使用
update_prompts
方法 1(https://github.com/run-llama/llama_index/blob/main/docs/docs/examples/prompts/prompt_mixin.ipynb)2(https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/response_synthesizers/accumulate.py)3(https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/response_synthesizers/accumulate.py) 更新了Accumulate
响应合成器中的text_qa_template
。