I am fresher in the prompt engineering. Recently, I am facing a problem in the langchain PromptTemplate. I'm using jupyter notebook. And I want to run this code but it's showing ValidationError. Here is my code
from langchain_google_genai import GoogleGenerativeAIEmbeddings
google_generative_ai_Embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=api_key)
from langchain_community.vectorstores import Chroma
vectordb = Chroma.from_documents(data,
embedding=google_generative_ai_Embeddings,
persist_directory='./chromadb')
retriever_google = vectordb.as_retriever(score_threshold = 0.7)
from langchain.prompts import PromptTemplate
prompt_template = """Given the following context and a question, generate an answer based on this context only.
In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
If the answer is not found in the context, kindly state "I don't know." Don't try to make up an answer.
CONTEXT: {context}
QUESTION: {question}"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
from langchain.chains import RetrievalQA
chain_type = "stuff"
chain = RetrievalQA.from_chain_type(llm=llm,
chain_type=chain_type,
retriever=retriever_google,
input_key="query",
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs)
and I am receiving this error
ValidationError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_4328\826232400.py in <module>
20 chain_type = "stuff"
21
---> 22 chain = RetrievalQA.from_chain_type(llm=llm,
23 chain_type=chain_type,
24 retriever=retriever_google,
~\AppData\Roaming\Python\Python39\site-packages\langchain\chains\retrieval_qa\base.py in from_chain_type(cls, llm, chain_type, chain_type_kwargs, **kwargs)
98 """Load chain from chain type."""
99 _chain_type_kwargs = chain_type_kwargs or {}
--> 100 combine_documents_chain = load_qa_chain(
101 llm, chain_type=chain_type, **_chain_type_kwargs
102 )
~\AppData\Roaming\Python\Python39\site-packages\langchain\chains\question_answering\__init__.py in load_qa_chain(llm, chain_type, verbose, callback_manager, **kwargs)
247 f"Should be one of {loader_mapping.keys()}"
248 )
--> 249 return loader_mapping[chain_type](
250 llm, verbose=verbose, callback_manager=callback_manager, **kwargs
251 )
~\AppData\Roaming\Python\Python39\site-packages\langchain\chains\question_answering\__init__.py in _load_stuff_chain(llm, prompt, document_variable_name, verbose, callback_manager, callbacks, **kwargs)
71 ) -> StuffDocumentsChain:
72 _prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm)
---> 73 llm_chain = LLMChain(
74 llm=llm,
75 prompt=_prompt,
~\AppData\Roaming\Python\Python39\site-packages\langchain\load\serializable.py in __init__(self, **kwargs)
73
74 def __init__(self, **kwargs: Any) -> None:
---> 75 super().__init__(**kwargs)
76 self._lc_kwargs = kwargs
77
~\AppData\Roaming\Python\Python39\site-packages\pydantic\v1\main.py in __init__(__pydantic_self__, **data)
339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)
340 if validation_error:
--> 341 raise validation_error
342 try:
343 object_setattr(__pydantic_self__, '__dict__', values)
ValidationError: 1 validation error for LLMChain
llm
Can't instantiate abstract class BaseLanguageModel with abstract methods agenerate_prompt, apredict, apredict_messages, generate_prompt, invoke, predict, predict_messages (type=type_error)
I was get this code from online source.And now I don't understand how to fix this issue? please help me to solve this issue.