OpenAI Conversation using ChatBox

Here’s a demo (note this is using the main branch on GitHub; ChatBox has a few issues in v1.1.0)

ezgif.com-video-to-gif (3)

from typing import List, Dict

import param
import panel as pn
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

pn.extension(template="bootstrap")


class OpenAIConversation(param.Parameterized):
    openai_key = param.String(
        default=None,
        doc="""
        OpenAI API key; if not set, will try using env var.""",
    )
    temperature = param.Number(
        default=0.5,
        bounds=(0, 1),
        doc="""
        The model temperature; higher values will result in more random outputs.""",
    )

    def __init__(self, **params):
        llm = OpenAI(temperature=0.5, openai_api_key=self.openai_key)
        self.chain = ConversationChain(llm=llm, memory=ConversationBufferMemory())
        self.chat_box = pn.widgets.ChatBox()
        self.chat_box.param.watch(self._chat, "value")

    # decorator to disable the chat box while the AI is thinking
    def _disable_inputs(func):
        def inner(self, *args, **kwargs):
            try:
                self.chat_box.disabled = True
                func(self, *args, **kwargs)
            finally:
                self.chat_box.disabled = False

        return inner

    @_disable_inputs
    def _chat(self, event) -> None:
        """
        Chat with the AI. This method is called whenever the user sends a message.
        """
        # user_messages = [{"You": "Your input"}, {"AI": "A response"}, ...]
        user_message = event.new[-1]
        input = user_message.get("You")
        if input is None:
            return
        self.chat_box.append({"AI": self.chain.predict(input=input)})

    def view(self):
        return self.chat_box.servable()


openai_conversation = OpenAIConversation()
openai_conversation.view()

Or a very stripped down version:

from typing import List, Dict

import panel as pn
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

pn.extension(template="bootstrap")

def chat(user_messages: List[Dict[str, str]]) -> None:
    # user_messages = [{"You": "Your input"}, {"AI": "A response"}, ...]
    user_message = user_messages[-1]
    input = user_message.get("You")
    if input is None:
        return
    chat_box.append({"AI": conversation.predict(input=input)})

llm = OpenAI(temperature=0.5)
conversation = ConversationChain(llm=llm, memory=ConversationBufferMemory())
chat_box = pn.widgets.ChatBox()
pn.bind(chat, user_messages=chat_box, watch=True)
chat_box.servable()
4 Likes

Would be nice to have async streaming of text, but I haven’t figured out how to replace the values in a row… Not sure if it’s a bug.

2 Likes

ezgif.com-video-to-gif (4)

Once this gets merged and released:

from typing import List, Dict

import param
import panel as pn
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.callbacks.base import BaseCallbackHandler

pn.extension(template="bootstrap")


class StreamHandler(BaseCallbackHandler):
    def __init__(self, container, initial_text="", target_attr="value"):
        self.container = container
        self.text = initial_text
        self.target_attr = target_attr

    def on_llm_new_token(self, token: str, **kwargs) -> None:
        self.text += token
        self.container.replace(-1, {"AI": [self.text]})


class OpenAIConversation(param.Parameterized):
    openai_key = param.String(
        default=None,
        doc="""
        OpenAI API key; if not set, will try using env var.""",
    )
    temperature = param.Number(
        default=0.5,
        bounds=(0, 1),
        doc="""
        The model temperature; higher values will result in more random outputs.""",
    )

    def __init__(self, **params):
        self._llm = OpenAI(
            streaming=True,
            temperature=0.5,
            openai_api_key=self.openai_key,
        )
        self._chain = ConversationChain(
            memory=ConversationBufferMemory(), llm=self._llm
        )
        self._spinner = pn.indicators.LoadingSpinner(
            value=True,
            width=18,
            height=18,
        )
        self.chat_box = pn.widgets.ChatBox()
        self.chat_box.param.watch(self._chat, "value")

    # decorator to disable the chat box while the AI is thinking
    def _disable_inputs(func):
        async def inner(self, *args, **kwargs):
            try:
                self.chat_box.disabled = True
                await func(self, *args, **kwargs)
            finally:
                self.chat_box.disabled = False

        return inner

    async def _chat(self, event) -> None:
        """
        Chat with the AI. This method is called whenever the user sends a message.
        """
        # user_messages = [{"You": "Your input"}, {"AI": "A response"}, ...]
        user_message = event.new[-1]
        input = user_message.get("You")
        if input is None:
            return
        self.chat_box.append({"AI": self._spinner})
        # need to re-attach the callback handler every time
        # so that it starts with new text every time
        self._llm.callbacks = [StreamHandler(self.chat_box)]
        await self._chain.apredict(input=input)

    def view(self):
        return self.chat_box.servable()


openai_conversation = OpenAIConversation()
openai_conversation.view()
4 Likes

@ahuang11 great example, thanks for sharing! Will be keeping an eye on your async/ token streaming updates.

One question: have you tried using tools in a Agent with this? I just got it working to stream the final answer, but not sure how to access the intermediate results. (Which, in some cases, I’ll want to add to the ChatBox content.)

Earlier I was able to get the intermediate results as text using @Marc’s previous example, but they came all together with no token streaming.

1 Like

I haven’t tested extensively, but I think it’s a matter of updating the StreamHandler.

1 Like

I have a PR to make this a lot simpler here

It’s still a draft and I will try applying to practical use cases, including langchain agents, plotting and evaluating code, and I’d like to invite anyone to also provide their feedback!

1 Like

Example use case:

  1. user uploading a CSV file
  2. internally extracts the column names
  3. asks OpenAI to write some code with hvplot to plot
  4. the plot gets shown and is interactive
2 Likes

With the upcoming Chat Components PR, it’s possible to use LangChain agents pretty effortlessly

import panel as pn
from langchain.agents import initialize_agent, AgentType, load_tools
from langchain.llms import OpenAI

pn.extension()

def callback(contents, name, chat_interface):
    agent.run(contents, callbacks=[PanelCallbackHandler(chat_interface=chat_interface)])
    yield system_entry.clone(value="That was fun, ask me more!")


system_entry = pn.widgets.ChatEntry(user="System", avatar="⚙️")
chat_interface = pn.widgets.ChatInterface(
    value=[system_entry.clone(value="Let's do math!")],
    callback=callback,
)
llm = OpenAI(streaming=True)
tools = load_tools(["pal-math"], llm=llm)
agent = initialize_agent(tools, llm)
pn.template.FastListTemplate(
    main=[chat_interface],
    title="MathGPT"
).servable()

The following will need to be merged into LangChain

class PanelCallbackHandler(BaseCallbackHandler):
    def __init__(
        self,
        chat_interface: pn.widgets.ChatInterface,
        user: str = "LangChain",
        avatar: str = "🦜️",
    ):
        self.chat_interface = chat_interface
        self._entry = None
        self._active_user = user
        self._active_avatar = avatar
        self._disabled_state = self.chat_interface.disabled

        self._input_user = user
        self._input_avatar = avatar

    def on_llm_start(self, serialized: Dict[str, Any], *args, **kwargs):
        model = kwargs.get("invocation_params", {}).get("model_name", "")
        if self._active_user and model not in self._active_user:
            self._active_user = f"{self._active_user} ({model})"
        return super().on_llm_start(serialized, *args, **kwargs)

    def on_llm_new_token(self, token: str, **kwargs) -> None:
        self._entry = self.chat_interface.stream(
            token.replace("\n", "<br>"),
            user=self._active_user,
            avatar=self._active_avatar,
            entry=self._entry,
        )
        return super().on_llm_new_token(token, **kwargs)

    def on_llm_end(self, response: LLMResult, *args, **kwargs):
        return super().on_llm_end(response, *args, **kwargs)

    def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs):
        return super().on_llm_error(error, *args, **kwargs)

    def on_agent_action(self, action: AgentAction, *args, **kwargs: Any) -> Any:
        return super().on_agent_action(action, *args, **kwargs)

    def on_agent_finish(self, finish: AgentFinish, *args, **kwargs: Any) -> Any:
        return super().on_agent_finish(finish, *args, **kwargs)

    def on_tool_start(
        self, serialized: Dict[str, Any], input_str: str, *args, **kwargs
    ):
        self._active_avatar = "🛠️"
        self._active_user = f"{self._active_user} - {serialized['name']}"
        return super().on_tool_start(serialized, input_str, *args, **kwargs)

    def on_tool_end(self, output, *args, **kwargs):
        self._active_user = self._input_user
        self._active_avatar = self._input_avatar
        return super().on_tool_end(output, *args, **kwargs)

    def on_tool_error(
        self, error: Union[Exception, KeyboardInterrupt], *args, **kwargs
    ):
        return super().on_tool_error(error, *args, **kwargs)

    def on_chain_start(
        self, serialized: Dict[str, Any], inputs: Dict[str, Any], *args, **kwargs
    ):
        self.chat_interface.disabled = True
        return super().on_chain_start(serialized, inputs, *args, **kwargs)

    def on_chain_end(self, outputs: Dict[str, Any], *args, **kwargs):
        self._entry = None
        self.chat_interface.disabled = self._disabled_state
        return super().on_chain_end(outputs, *args, **kwargs)
3 Likes

If we can’t get it into langchain I guess we can have it in Panel?

We could initially have it in Panel then deprecate once in LangChain, but we’ll first see how fast/slow it is to make a PR to LangChain.

2 Likes