LangchainJs Streaming with OpenAIFunctions Agent

163 views Asked by At

So I am trying to make this AdonisJS app with langchain and OpenAI. I have got everything right but the streaming thing is giving me very tough time. I did everything exactly as in the documentation but it doesn't work and I have no idea why. I have this executor provider where I have functions to set memory, prompts, tools, initialize executor and invoke. I am sharing init and invoke methods below. Please help me

// Agent Intializer
public initExecutor() {
    const tools = this.tools

    const model = new ChatOpenAI({ modelName: "gpt-4", temperature: 0, streaming: true });

    const modelWithTools = model.bind({ functions: [...tools.map((tool) => formatToOpenAIFunction(tool))], });

    const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
        steps.flatMap(({ action, observation }) => {
            if ("messageLog" in action && action.messageLog !== undefined) {
                const log = action.messageLog as BaseMessage[];
                return log.concat(new FunctionMessage(observation, action.tool));
            } else {
                return [new AIMessage(action.log)];
            }
        });

    let runnableAgent;
    if (tools.length != 0) {
        runnableAgent = RunnableSequence.from([
            {
                input: (i: { input: string; steps: AgentStep[] }) => i.input,
                agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>
                    formatAgentSteps(i.steps),

                // Load memory here
                chat_history: async (_: { input: string; steps: AgentStep[] }) => {
                    const { history } = await this.memory.loadMemoryVariables({});
                    return history;
                },
            },
            this.prompt,
            modelWithTools,
            new OpenAIFunctionsAgentOutputParser(),
            // outputParser,
        ])
    } else {
        runnableAgent = RunnableSequence.from([
            {
                input: (i: { input: string; steps: AgentStep[] }) => i.input,
                agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>
                    formatAgentSteps(i.steps),
                // Load memory here
                chat_history: async (_: { input: string; steps: AgentStep[] }) => {
                    const { history } = await this.memory.loadMemoryVariables({});
                    return history;
                },
            },
            this.prompt,
            new OpenAIFunctionsAgentOutputParser(),
        ]);
    }
    this.executor = AgentExecutor.fromAgentAndTools({
        agent: runnableAgent,
        tools,
    });
    return this.executor;
}
// Agent Invoke
public async invoke({ input }: { input: string }) {
    try {
        const stream = await this.executor.stream({
            input: input,
        });

        for await (const chunk of stream) {
            console.log(JSON.stringify(chunk, null, 2));
        }
        console.log(stream)
        // return stream;
    } catch (error) {
        console.error(error);
        return error;
    }
}
0

There are 0 answers