Streamlit Stream Callback in Haystack 2.x

zy1mlcev  于 5个月前  发布在  其他
关注(0)|答案(5)|浏览(69)

描述您希望的解决方案

我希望在使用OpenAIChatGenerator时,能够在streamlit上实时查看流式响应。

描述您考虑过的替代方案

我已经尝试了以下streaming_callback函数。(我只放了相关的代码字段。如果需要,我可以提供完整的代码)

import streamlit as st

# Method 1: Nothing seen in the UI
def st_streaming_callback(chunk: StreamingChunk):
    yield chunk.content

# Method 2: Instead of typewriter format, each chunk seen as one line output.
def st_streaming_callback(chunk: StreamingChunk):
    st.write(chunk.content)

model = OpenAIChatGenerator(
    model="gpt-4-turbo-preview",
    streaming_callback=streaming_callback,
    generation_kwargs=None,
)

response_dict = st.write_stream(
  llm_chat.run(
      messages=[
          ChatMessage(
              role=message.role, content=message.content, name=None
          )
          for message in st.session_state.messages
      ]
 )

附加上下文

在这里添加有关此功能请求的其他上下文或截图。

arknldoa

arknldoa1#

这是一个有趣的用例,但需要进行一些调查。我认为我们应该提供一个关于如何处理流的指南(+代码示例)。关于Streamlit的示例将是有帮助的。

jfgube3f

jfgube3f2#

这是一个有趣的用例,但需要一些调查。我认为我们应该提供一个关于如何处理流的指南(+代码示例)。关于Streamlit的示例会很有帮助。那将是完美的,谢谢。

lymgl2op

lymgl2op3#

你好!我正在与这个问题作斗争。
以下是一个个人例子,我正在使用OllamaChatGenerator,但同样的原理可以应用于OpenAI的聊天生成器。
很高兴在文档或其他方面提供帮助!

使用OllamaChatGenerator的类文件

import streamlit as st
from haystack import Pipeline
from haystack_integrations.components.generators.ollama import OllamaChatGenerator
from haystack.dataclasses import StreamingChunk, ChatMessage

class ConversationalChatbot:
    """
A conversational chatbot which will stream responses to the Streamlit
UI.
"""

    def __init__(self):
        """
Initializes the chatbot with a language model and a default system message.
"""
        self.large_language_model = OllamaChatGenerator(
            model='orca-mini',
            url='http://localhost:11434/api/chat',
            streaming_callback=self.streamlit_write_streaming_chunk
        )

        self.messages = [ChatMessage.from_system("\nYou are a helpful, respectful and honest assistant")]

    def query(self, query: str) -> str:
        """
Run a query and return the response from the language model.

Args:
query (str): The user's query string.

Returns:
str: The assistant's response.
"""
        # Create a new Streamlit container for the AI's response.
        self.placeholder = st.empty()

        # Initialize an empty list for response tokens.
        self.tokens = []

        # Add the user's query to the chat history.
        self.messages.append(ChatMessage.from_user(query))

        # Send the chat history to the language model and get the response.
        response = self.large_language_model.run(self.messages)

        # Check if the response contains valid replies.
        if 'replies' in response:
            response_content = response['replies'][0].content
            # Add the assistant's response to the chat history.
            self.messages.append(ChatMessage.from_assistant(response_content))
            return response_content
        else:
            raise Exception('No valid response or unexpected response format.')

    def streamlit_write_streaming_chunk(self, chunk: StreamingChunk):
        """
Streams a response chunk to the Streamlit UI.

Args:
chunk (StreamingChunk): The streaming chunk from the language model.
"""
        # Append the latest streaming chunk to the tokens list.
        self.tokens.append(chunk.content)

        # Update the Streamlit container with the current stream of tokens.
        self.placeholder.write("".join(self.tokens))

    def add_message_to_chat_history(self, message: ChatMessage):
        """
Add a message to the chat history.

Args:
message (ChatMessage): The message to add to the chat history.
"""
        self.messages.append(message)

这是streamlit的'app'文件

import streamlit as st
from haystack import Pipeline
from haystack.dataclasses import ChatMessage
from conversational_pipeline import ConversationalChatbot

# Constants to store key names in the config dictionary
TITLE_NAME = 'title_name'
UI_RENDERED_MESSAGES = 'ui_rendered_messages'
CHAT_HISTORY = 'chat_history'
CONVERSATIONAL_PIPELINE = 'conversational_pipeline'

def main():
    """
Render the retrieval augmented generation (RAG) chatbot application.
"""
    config = load_config()
    initialize_session_state(config)
    setup_page()
    render_chat_history()
    manage_chat()

def load_config():
    """
Load the application configuration from a file or object.

Returns:
dict: Configuration dictionary containing title name,
UI rendered messages, chat history, and conversational pipeline instance.
"""
    return {
        TITLE_NAME: 'Haystack Streaming Example',
        UI_RENDERED_MESSAGES: [],
        CHAT_HISTORY: [],
        CONVERSATIONAL_PIPELINE: ConversationalChatbot()
    }

def setup_page():
    """
Set Streamlit page configuration and title.
"""
    st.set_page_config(page_title=st.session_state[TITLE_NAME])
    st.title(st.session_state[TITLE_NAME])

def initialize_session_state(config):
    """
Initialize Streamlit session state variables using the provided configuration.

Args:
config (dict): Configuration dictionary.
"""
    for key, value in config.items():
        if key not in st.session_state:
            st.session_state[key] = value

def manage_chat():
    """
Handle user interaction with the conversational AI and render
the user query along with the AI response.
"""
    if prompt := st.chat_input('What can we help you with?'):
        # Render user message.
        with st.chat_message('user'):
            st.markdown(prompt)
        st.session_state[UI_RENDERED_MESSAGES].append({'role': 'user', 'content': prompt})

        # Render AI assistant's response.
        with st.chat_message('assistant'):
            with st.spinner('Generating response . . .'):
                response = st.session_state[CONVERSATIONAL_PIPELINE].query(prompt)
        st.session_state[UI_RENDERED_MESSAGES].append({'role': 'assistant', 'content': response})

def render_chat_history():
    """
Display the chat message history stored in session state.
"""
    for message in st.session_state[UI_RENDERED_MESSAGES]:
        with st.chat_message(message['role']):
            st.markdown(message['content'])

if __name__ == '__main__':
    main()

haystack-streamlit-with-streaming.mp4

ev7lccsx

ev7lccsx4#

感谢T-Visor提供的代码,在我这边运行得非常完美。我希望它能被原生集成到haystack中。

t9eec4r0

t9eec4r05#

@ilkersigirci 不用谢!这是一个小技巧,但很高兴它能用。希望对你有帮助。

相关问题