naveen07garg commited on
Commit
f0f41ed
·
verified ·
1 Parent(s): 3abe4ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -483,7 +483,7 @@ def generate_rag_response(user_input, retriever, k=3, max_tokens=1500):
483
  ]
484
 
485
  #llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, max_tokens=max_tokens)
486
- #response = llm.invoke(messages)
487
  #return {"answer": response.content, "sources": [d.metadata for d in relevant_docs]}
488
  # You still used ChatOpenAI (from langchain-openai) for generating answers.
489
  # That’s where the proxies keyword issue blew up, since that part was still using the buggy client.
 
483
  ]
484
 
485
  #llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, max_tokens=max_tokens)
486
+ #response = llm.invoke(messages)
487
  #return {"answer": response.content, "sources": [d.metadata for d in relevant_docs]}
488
  # You still used ChatOpenAI (from langchain-openai) for generating answers.
489
  # That’s where the proxies keyword issue blew up, since that part was still using the buggy client.