-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmain.py
executable file
·106 lines (91 loc) · 4.56 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import os
import warnings
import requests
from dotenv import load_dotenv
import streamlit as st
from llama_index.llms.groq import Groq
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import SentenceSplitter
from crewai import Crew, Process
from agents import *
from tools import *
from tasks import *
warnings.filterwarnings("ignore")
load_dotenv()
st.title("Literature Survey Made Easy!")
GROQ_API_KEY = st.text_input("Enter your GROQ API key: ", type="password")
LLM_MODEL_NAME = os.environ.get("OPENAI_MODEL_NAME")
EMBEDDING_MODEL_NAME = os.environ.get("EMBEDDING_MODEL_NAME")
embed_model = HuggingFaceEmbedding(model_name=EMBEDDING_MODEL_NAME)
llm = Groq(model=LLM_MODEL_NAME, api_key=GROQ_API_KEY)
if GROQ_API_KEY:
try:
choice = st.radio("Select the task you are interested in", ["Interact with a Research Paper", "Research about a research topic"])
if choice == "Interact with a Research Paper":
pdf_url = st.text_input("Enter PDF url")
if pdf_url:
try:
response = requests.get(pdf_url)
save_path = "paper.pdf"
with open(save_path, 'wb') as file:
file.write(response.content)
reader = SimpleDirectoryReader(input_files=[save_path])
documents = reader.load_data()
text_splitter = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
nodes = text_splitter.get_nodes_from_documents(documents, show_progress=True)
index = VectorStoreIndex.from_documents(documents,
show_progress=True,
embed_model=embed_model,
node_parser=nodes)
query_engine = index.as_query_engine(similarity_top_k=5, llm=llm)
question = st.text_input("Ask question", key="question")
if st.button("Submit", key="5"):
if question:
st.write("Searching.......")
response = query_engine.query(question)
st.write(response.response)
else:
st.write("Ask question")
except Exception as e:
print(f"Error: {e}")
else:
if not pdf_url:
st.write("Please provide the url to download the PDF.")
else:
user_input = st.text_input("Which research topic you are interested in?")
if st.button("Submit", key="3"):
if user_input:
st.write("please have patience, Our Crew is at work :)")
tools = Tools(user_input)
serper_dev_tool = tools.serper_dev_tool()
scrape_website_tool = tools.scrape_website_tool()
arxiv_query_tool = tools.arxiv_query_tool(embed_model, llm)
agents = Agents(user_input)
web_researcher = agents.web_researcher(serper_dev_tool, scrape_website_tool)
arxiv_researcher = agents.arxiv_researcher(arxiv_query_tool)
analyst = agents.analyst()
manager = agents.manager()
tasks = Tasks(user_input)
web_researcher_task = tasks.web_researcher_task(web_researcher)
arxiv_researcher_task = tasks.arxiv_researcher_task(arxiv_researcher)
analyst_task = tasks.analyst_task(analyst)
crew = Crew(
agents=[web_researcher, arxiv_researcher, analyst],
tasks=[web_researcher_task, arxiv_researcher_task, analyst_task],
manager_agent=manager,
process=Process.hierarchical,
# cache=True,
# memory=True,
verbose=True,
)
result = crew.kickoff()
st.write("AI response: ")
st.write(result.raw)
else:
st.write("Please enter research topic")
except Exception as e:
st.error(f"Error: {e}")
else:
if not GROQ_API_KEY:
st.write("Please provide your OpenAI API key")