Giving the same for every question for a given context

38 views Asked by At

I am using Simple Transformers Question & Answer BERT model. I have created a custom dataset for question and answer model and I have trained the model using the custom dataset. It is asked to enter the context and question then it is giving the answer as whatever there in context, instead it should give me the appropriate answer. For a giving context, it is giving me the same answer for different questions. Please help me how to solve this and why it is causing this issue.

Note: I have splitted my JSON data of question and aswers into train, test and pred. Using tarin data i have tarined the model.

I want the model to answer the user question properly. Is there any possibility of getting the answer using this model without giving the context by the user.

##This is my Code:                                                                                             from google.colab import drive
drive.mount('/content/drive')

from google.colab import files
uploaded = files.upload()

import os
os.listdir()

import shutil

# Example: Copy a file to Google Drive
shutil.copy('pred_data.json', '/content/drive/MyDrive')

import json

# Assuming 'pred_data.json' is the name of your uploaded JSON file
with open('pred_data.json', 'r') as file:
    data = json.load(file)

!pip install simpletransformers

import json
with open(r"train_data.json", "r") as read_file:
    train_data = json.load(read_file)

train_data

import json
with open(r"test_data.json", "r") as read_file:
  test_data = json.load(read_file)

test_data

import json
with open(r"pred_data.json", "r") as read_file:
  pred_data = json.load(read_file)

pred_data

import logging

from simpletransformers.question_answering import QuestionAnsweringModel, QuestionAnsweringArgs

model_type="bert"
model_name= "bert-base-cased"
if model_type == "bert":
    model_name = "bert-base-cased"

elif model_type == "roberta":
    model_name = "roberta-base"

elif model_type == "distilbert":
    model_name = "distilbert-base-cased"

elif model_type == "distilroberta":
    model_type = "roberta"
    model_name = "distilroberta-base"

elif model_type == "electra-base":
    model_type = "electra"
    model_name = "google/electra-base-discriminator"

elif model_type == "electra-small":
    model_type = "electra"
    model_name = "google/electra-small-discriminator"

elif model_type == "xlnet":
    model_name = "xlnet-base-cased"

### Advanced Methodology
train_args = {
    "reprocess_input_data": True,
    "overwrite_output_dir": True,
    "use_cached_eval_features": True,
    "output_dir": f"outputs/{model_type}",
    "best_model_dir": f"outputs/{model_type}/best_model",
    "evaluate_during_training": True,
    "max_seq_length": 128,
    "num_train_epochs": 30,
    "evaluate_during_training_steps": 1000,
    "wandb_project": "Question Answer Application",
    "wandb_kwargs": {"name": model_name},
    "save_model_every_epoch": False,
    "save_eval_checkpoints": False,
    "n_best_size":3,
    # "use_early_stopping": True,
    # "early_stopping_metric": "mcc",
    # "n_gpu": 2,
    # "manual_seed": 4,
    # "use_multiprocessing": False,
    "train_batch_size": 128,
    "eval_batch_size": 64,
    # "config": {
    #     "output_hidden_states": True
    # }
}

##Initialize the model
model = QuestionAnsweringModel(
    model_type,model_name, args=train_args, use_cuda=False
)

# Train the model
model.train_model(train_data, eval_data=test_data)

# Evaluate the model
result, texts = model.eval_model(test_data)

result

# User Input
user_context = input("Enter the context: ")
user_question = input("Enter your question: ")

# Create a question-answer scenario based on user input
user_input = [
    {
        "context": user_context,
        "qas": [
            {
                "question": user_question,
                "id": "0",
            }
        ],
    }
]

# Get answers from the model
answers, probabilities = model.predict(user_input)

# Display the answers
print("Answer:", answers)
 
0

There are 0 answers