How to use langchain to create my own dolly chat robot

321 views Asked by At

I have a operation Manual pdf file about a website and i want to use langchain let dolly can responed the question about the website below is my code:

from langchain.embeddings import HuggingFaceEmbeddings
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import HuggingFacePipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain.prompts import PromptTemplate
import torch

hf_embed = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
root_dir = "/content/gdrive/My Drive/"
reader = PdfReader('/content/gdrive/My Drive/data/operation Manual.pdf')
raw_text = ''
for i, page in enumerate(reader.pages):
    text = page.extract_text()
    if text:
        raw_text += text
text_splitter = CharacterTextSplitter(        
    separator = "\n",
    chunk_size = 1000,
    chunk_overlap  = 200,
    length_function = len,
)
texts = text_splitter.split_text(raw_text)
docsearch = FAISS.from_texts(texts, hf_embed)
model_name = "databricks/dolly-v2-3b"
instruct_pipeline = pipeline(model=model_name, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", 
                               return_full_text=True, max_new_tokens=256, top_p=0.95, top_k=50)
hf_pipe = HuggingFacePipeline(pipeline=instruct_pipeline)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.

{context}

Question: {question}
"""
PROMPT = PromptTemplate(
    template=prompt_template, input_variables=["context", "question"]
)
query = "I forgot my login password."          
docs = docsearch.similarity_search(query)
chain = load_qa_chain(llm = hf_pipe, chain_type="stuff", prompt=PROMPT)
chain({"input_documents": docs, "question": query}, return_only_outputs=True)

I am refer to the article in the databricks website [https://www.databricks.com/resources/demos/tutorials/data-science-and-ai/build-your-chat-bot-with-dolly?itm_data=demo_center]

the output is not the answer that in the operation Manual and it have to spend much time to output answer. where did i do wrong? by the way i try this by using openai can work

i want to let dolly can response a correct answer about what i want and do not spend so much time

0

There are 0 answers