This is the code for the route.ts file in the example repo that uses the Vercel KV:
import { kv } from '@vercel/kv'
import { OpenAIStream, StreamingTextResponse } from 'ai'
import { Configuration, OpenAIApi } from 'openai-edge'
import { auth } from '@/auth'
import { nanoid } from '@/lib/utils'
export const runtime = 'edge'
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY
})
const openai = new OpenAIApi(configuration)
export async function POST(req: Request) {
const json = await req.json()
const { messages, previewToken } = json
const userId = (await auth())?.user.id
if (!userId) {
return new Response('Unauthorized', {
status: 401
})
}
if (previewToken) {
configuration.apiKey = previewToken
}
const res = await openai.createChatCompletion({
model: 'gpt-3.5-turbo',
messages,
temperature: 0.7,
stream: true
})
const stream = OpenAIStream(res, {
async onCompletion(completion) {
const title = json.messages[0].content.substring(0, 100)
const id = json.id ?? nanoid()
const createdAt = Date.now()
const path = `/chat/${id}`
const payload = {
id,
title,
userId,
createdAt,
path,
messages: [
...messages,
{
content: completion,
role: 'assistant'
}
]
}
await kv.hmset(`chat:${id}`, payload)
await kv.zadd(`user:chat:${userId}`, {
score: createdAt,
member: `chat:${id}`
})
}
})
return new StreamingTextResponse(stream)
}
And here is the code in the Vercel AI SDK for using LangChain.
import { NextRequest } from 'next/server';
import { Message as VercelChatMessage, StreamingTextResponse } from 'ai';
import { ChatOpenAI } from 'langchain/chat_models/openai';
import { BytesOutputParser } from 'langchain/schema/output_parser';
import { PromptTemplate } from 'langchain/prompts';
export const runtime = 'edge';
/**
* Basic memory formatter that stringifies and passes
* message history directly into the model.
*/
const formatMessage = (message: VercelChatMessage) => {
return `${message.role}: ${message.content}`;
};
const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect.
Current conversation:
{chat_history}
User: {input}
AI:`;
/*
* This handler initializes and calls a simple chain with a prompt,
* chat model, and output parser. See the docs for more information:
*
* https://js.langchain.com/docs/guides/expression_language/cookbook#prompttemplate--llm--outputparser
*/
export async function POST(req: NextRequest) {
const body = await req.json();
const messages = body.messages ?? [];
const formattedPreviousMessages = messages.slice(0, -1).map(formatMessage);
const currentMessageContent = messages[messages.length - 1].content;
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
/**
* See a full list of supported models at:
* https://js.langchain.com/docs/modules/model_io/models/
*/
const model = new ChatOpenAI({
temperature: 0.8,
});
/**
* Chat models stream message chunks rather than bytes, so this
* output parser handles serialization and encoding.
*/
const outputParser = new BytesOutputParser();
/*
* Can also initialize as:
*
* import { RunnableSequence } from "langchain/schema/runnable";
* const chain = RunnableSequence.from([prompt, model, outputParser]);
*/
const chain = prompt.pipe(model).pipe(outputParser);
const stream = await chain.stream({
chat_history: formattedPreviousMessages.join('\n'),
input: currentMessageContent,
});
return new StreamingTextResponse(stream);
}
Any idea on how to implement the KV with LangChain? Your help will be greatly appreciated.
I am struggling to find a solution to implementing the Vercel KV with LangChain.