python ImportError:无法从“llama_index”导入名称“load_index_from_storage”

66bbxpm5  于 2023-05-16  发布在  Python
关注(0)|答案(1)|浏览(1000)

当我尝试在Llamaindex 0.6.5的git中运行示例代码时遇到了错误。如何正确导入“load_index_from_storage”?还是有别的选择
语言:python。IDE:VScode.

from llama_index import GPTVectorStoreIndex,          
SimpleDirectoryReader, load_index_from_storage, StorageContext
from IPython.display import Markdown, display
import openai
import os
import sys

os.environ['OPENAI_API_KEY']= "sk-***"

documents = SimpleDirectoryReader('data').load_data()
index = GPTVectorStoreIndex.from_documents(documents)

# save index to disk
index.set_index_id("vector_index")
index.storage_context.persist('storage')

# rebuild storage context
storage_context = 
StorageContext.from_defaults(persist_dir='storage')
# load index
index = load_index_from_storage(storage_context, 
index_id="vector_index")

# set Logging to DEBUG for more detailed outputs
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response)
hjzp0vay

hjzp0vay1#

这是根据llama_index文档更新的代码,用于问答。

# from gpt_index import SimpleDirectoryReader, GPTListIndex,readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
from types import FunctionType
from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, PromptHelper, SimpleDirectoryReader, load_index_from_storage
import sys
import os
import time 

os.environ["OPENAI_API_KEY"] = "your api key" # gpt 3.5 turbo
from llama_index.node_parser import SimpleNodeParser

from llama_index import StorageContext, load_index_from_storage
from langchain.chat_models import ChatOpenAI
parser = SimpleNodeParser()



def construct_index(directory_path):
    max_input_size = 4096
    num_outputs = 500
    max_chunk_overlap = 256
    chunk_size_limit = 1024

    print("*"*5, "Documents parsing initiated", "*"*5)
    file_metadata = lambda x : {"filename": x}
    reader = SimpleDirectoryReader(directory_path, file_metadata=file_metadata)
    documents = reader.load_data()
    
  
    # nodes = parser.get_nodes_from_documents(documents)
    # index = GPTVectorStoreIndex(nodes)
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
    
    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)

    # print("*"*5, "Index creation initiated", "*"*5)
    index = GPTVectorStoreIndex.from_documents(
        documents=documents, service_context = service_context
    )
    # print("*"*5, "Index created", "*"*5)
    index.storage_context.persist("./entire_docs")
    return index
    
construct_index("./docs")
storage_context = StorageContext.from_defaults(persist_dir="./entire_docs")
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine()
while True:
    text_input = input("YOU : ")
    response = query_engine.query(text_input)
    print("Bot : ", response)
    print('\n')

以上代码适用于llama_index==0.6.1

相关问题