Advertisement
sandipto729

Untitled

Jun 12th, 2025
884
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.78 KB | Source Code | 0 0
  1. from flask import Flask, request, jsonify
  2. from flask_cors import CORS
  3. from dotenv import load_dotenv
  4. import os
  5.  
  6. from langchain.prompts import PromptTemplate
  7. from langchain.memory import ConversationBufferWindowMemory
  8. from langchain_community.embeddings import HuggingFaceEmbeddings
  9. from langchain_community.vectorstores import Chroma
  10. from langchain.chains import RetrievalQA
  11. from langchain_groq import ChatGroq
  12.  
  13.  
  14.  
  15. # Load environment
  16. load_dotenv()
  17. groq_api_key = os.getenv("GROQ_API_KEY")
  18. FRONTEND_URL = os.getenv("FRONTEND_URL")
  19. if not groq_api_key:
  20.     raise ValueError("GROQ_API_KEY is not set in your .env file.")
  21. os.environ["GROQ_API_KEY"] = groq_api_key
  22.  
  23. # Flask setup
  24. app = Flask(__name__)
  25. CORS(app, supports_credentials=True, resources={r"/*": {"origins": FRONTEND_URL}})
  26.  
  27. # Load existing vectorstore (Chroma) from disk
  28. embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
  29. vectorstore = Chroma(persist_directory="./chroma_doc_db", embedding_function=embedding)
  30.  
  31. @app.route('/')
  32. def index():
  33.     return "Backend is running!"
  34.  
  35. @app.route('/api/process-url', methods=['POST'])
  36. def process_url():
  37.     data = request.json
  38.     query = data.get('query')
  39.     if not query:
  40.         return jsonify({"error": "Query is required"}), 400
  41.  
  42.     try:
  43.         # Initialize LLM
  44.         model = ChatGroq(
  45.             model_name="llama3-8b-8192",
  46.             api_key=groq_api_key,
  47.             temperature=0.6,
  48.             max_tokens=512
  49.         )
  50.  
  51.  
  52.         # Prompt template
  53.         prompt_template = PromptTemplate(
  54.             input_variables=["context", "question"],
  55.             template="""
  56.            You are a helpful assistant that provides concise answers based on the provided documents.
  57.  
  58.            Context: {context}
  59.  
  60.            Question: {question}
  61.  
  62.            Answer:"""
  63.         )
  64.  
  65.         # Memory
  66.         memory = ConversationBufferWindowMemory(
  67.             memory_key="chat_history",
  68.             return_messages=True,
  69.             k=5,
  70.             output_key="result"
  71.         )
  72.  
  73.         # Retriever and QA chain
  74.         retriever = vectorstore.as_retriever(search_kwargs={"k": 2})
  75.         qa = RetrievalQA.from_chain_type(
  76.             llm=model,
  77.             chain_type="stuff",
  78.             retriever=retriever,
  79.             return_source_documents=True,
  80.             memory=memory,
  81.             chain_type_kwargs={"prompt": prompt_template},
  82.             output_key="result"
  83.         )
  84.  
  85.         # Run query
  86.         result = qa({"query": query})
  87.        
  88.         return jsonify({
  89.             "answer": result["result"],
  90.         })
  91.  
  92.     except Exception as e:
  93.         return jsonify({"error": str(e)}), 500
  94.  
  95. if __name__ == "__main__":
  96.     port = int(os.environ.get("PORT", 8000))
  97.     app.run(host="0.0.0.0", port=port)
  98.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement