Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -483,7 +483,7 @@ def generate_rag_response(user_input, retriever, k=3, max_tokens=1500):
|
|
| 483 |
]
|
| 484 |
|
| 485 |
#llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, max_tokens=max_tokens)
|
| 486 |
-
#response = llm.invoke(messages)
|
| 487 |
#return {"answer": response.content, "sources": [d.metadata for d in relevant_docs]}
|
| 488 |
# You still used ChatOpenAI (from langchain-openai) for generating answers.
|
| 489 |
# That’s where the proxies keyword issue blew up, since that part was still using the buggy client.
|
|
|
|
| 483 |
]
|
| 484 |
|
| 485 |
#llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, max_tokens=max_tokens)
|
| 486 |
+
#response = llm.invoke(messages)
|
| 487 |
#return {"answer": response.content, "sources": [d.metadata for d in relevant_docs]}
|
| 488 |
# You still used ChatOpenAI (from langchain-openai) for generating answers.
|
| 489 |
# That’s where the proxies keyword issue blew up, since that part was still using the buggy client.
|