from rag_opt import init_chat_model, init_embeddings, init_vectorstore
from rag_opt.rag import RAGWorkflow
from rag_opt.eval.eval import RAGEvaluator
from rag_opt.dataset import TrainDataset
# Initialize RAG Workflow
llm = init_chat_model(model="gpt-3.5-turbo", model_provider="openai", api_key=OPENAI_API_KEY)
embeddings = init_embeddings(
model="all-MiniLM-L6-v2",
model_provider="huggingface",
api_key=HUGGINFACE_API_KEY
)
llm = init_chat_model(
model="gpt-3.5-turbo",
model_provider="openai",
api_key=OPENAI_API_KEY
)
vector_store = init_vectorstore(
provider="faiss",
embeddings=embeddings,
)
rag = RAGWorkflow(
embeddings=embeddings,
vector_store=vector_store,
llm=llm,
retrieval_config={
"search_type": "hybrid",
"k": 3
},
)
# generate evaluation dataset (to be used by evaluator)
train_dataset = TrainDataset.from_json("./rag_dataset.json")
eval_dataset = rag.get_batch_answers(train_dataset)
# Evaluate
evaluator = RAGEvaluator(evaluator_llm=llm,evaluator_embedding=embeddings)
results = evaluator.evaluate(eval_dataset,return_tensor=False)
print(results)