from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Github Issues",
metadata_info=[
MetadataInfo(name="state", description="Whether the issue is `open` or `closed`", type="string"),
MetadataInfo(name="year", description="The year issue was created", type="integer"),
MetadataInfo(name="month", description="The month issue was created", type="integer"),
MetadataInfo(name="day", description="The day issue was created", type="integer"),
MetadataInfo(name="assignee", description="The assignee of the ticket", type="string"),
MetadataInfo(name="size", description="How big the issue is (XS, S, M, L, XL, XXL)", type="string"),
],
)
from llama_index.core.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(
index,
vector_store_info=vector_store_info,
similarity_top_k=2,
empty_query_top_k=10,
verbose=True,
)
尝试一下:
from llama_index.core import QueryBundle
nodes = retriever.retrieve(QueryBundle("Tell me about some issues on 01/11"))
print(f"Number of source nodes: {len(nodes)}")
print(nodes[0].node.metadata)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
query_engine = RetrieverQueryEngine.from_args(retriever, llm=llm)
response = query_engine.query("Tell me about some issues on 01/11")
print(str(response))
response = query_engine.query("Tell me about some open issues related to agents")
print(str(response))
1条答案
按热度按时间dbf7pr2w1#
要使用Weaviate作为矢量存储执行多文档检索增强生成(RAG),请按照以下步骤操作:
通过遵循这些步骤,您可以使用Weaviate作为矢量存储设置一个多文档RAG系统。此外,还可以使用LlamaIndex库中的
MultiDocAutoRetrieverPack
类来简化过程,确保元数据节点和文档片段分别被索引,并启用递归检索以提高查询效率 1(https://github.com/run-llama/llama_index/blob/main/docs/docs/examples/query_engine/multi_doc_auto_retrieval/multi_doc_auto_retrieval.ipynb)2(https://github.com/run-llama/llama_index/blob/main/llama-index-packs/llama-index-packs-multidoc-autoretrieval/llama_index/packs/multidoc_autoretrieval/base.py)3(https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/vector_stores/weaviate.py)4(https://github.com/run-llama/llama_index/blob/main/llama-index-integrations/vector_stores/llama-index-vector-stores-weaviate/llama_index/vector_stores/weaviate/base.py)。