I'm following Llama Index's documentation, which seems out-of-date. This seems to be true of just about everything in the LLM space. I've checked OpenAI's documentation and I can't seem to find the right API. Maybe I'm missing something?
When I run main.py, this is the output:
Hello! How can I assist you today?
Traceback (most recent call last):
File "/Users/me/Documents/openai-agent/main.py", line 80, in <module>
print(agent.chat("What is 2123 * 215123"))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/me/Documents/openai-agent/main.py", line 54, in chat
function_message = self._call_function(tool_call)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/me/Documents/openai-agent/main.py", line 62, in _call_function
id_ = tool_call["id"]
~~~~~~~~~^^^^^^
TypeError: 'ChatCompletionMessageToolCall' object is not subscriptable
Here's the code:
from typing import Sequence, List
from dotenv import load_dotenv
import json
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
load_dotenv()
def multiply(a: int, b: int) -> int:
"""Multiplies two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Adds two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
class MyOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
chat_history: List[ChatMessage] = [],
) -> None:
self._llm = llm
self._tools = {tool.metadata.name: tool for tool in tools}
self._chat_history = chat_history
def reset(self) -> None:
self._chat_history = []
def chat(self, message: str) -> str:
chat_history = self._chat_history
chat_history.append(ChatMessage(role="user", content=message))
tools = [
tool.metadata.to_openai_tool() for _, tool in self._tools.items()
]
ai_message = self._llm.chat(chat_history, tools=tools).message
additional_kwargs = ai_message.additional_kwargs
chat_history.append(ai_message)
tool_calls = ai_message.additional_kwargs.get("tool_calls", None)
# parallel function calling is now supported
if tool_calls is not None:
for tool_call in tool_calls:
function_message = self._call_function(tool_call)
chat_history.append(function_message)
ai_message = self._llm.chat(chat_history).message
chat_history.append(ai_message)
return ai_message.content
def _call_function(self, tool_call: dict) -> ChatMessage:
id_ = tool_call["id"]
function_call = tool_call["function"]
tool = self._tools[function_call["name"]]
output = tool(**json.loads(function_call["arguments"]))
return ChatMessage(
name=function_call["name"],
content=str(output),
role="tool",
additional_kwargs={
"tool_call_id": id_,
"name": function_call["name"],
},
)
if __name__ == "__main__":
agent = MyOpenAIAgent(tools=[multiply_tool, add_tool])
print(agent.chat("Hi"))
print(agent.chat("What is 2123 * 215123"))
UPDATE
Here's the updated working code as of 3.26.1991: