Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,13 +1,36 @@
|
|
| 1 |
-
import
|
|
|
|
|
|
|
|
|
|
| 2 |
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
#
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from huggingface_hub import InferenceClient, login
|
| 3 |
+
from transformers import AutoTokenizer
|
| 4 |
+
from langchain.chat_models import ChatOpenAI
|
| 5 |
|
| 6 |
+
# access token with permission to access the model and PRO subscription
|
| 7 |
+
hf_token = "YOUR_HF_TOKEN" # <https://huggingface.co/settings/tokens>
|
| 8 |
+
login(token=hf_token)
|
| 9 |
|
| 10 |
+
# tokenizer for generating prompt
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-70b-chat-hf")
|
| 12 |
|
| 13 |
+
# inference client
|
| 14 |
+
client = InferenceClient("https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf")
|
| 15 |
+
|
| 16 |
+
# generate function
|
| 17 |
+
def generate(text):
|
| 18 |
+
payload = tokenizer.apply_chat_template([{"role":"user","content":text}],tokenize=False)
|
| 19 |
+
res = client.text_generation(
|
| 20 |
+
payload,
|
| 21 |
+
do_sample=True,
|
| 22 |
+
return_full_text=False,
|
| 23 |
+
max_new_tokens=2048,
|
| 24 |
+
top_p=0.9,
|
| 25 |
+
temperature=0.6,
|
| 26 |
+
)
|
| 27 |
+
return res.strip()
|
| 28 |
+
|
| 29 |
+
# test client
|
| 30 |
+
assert generate("What is 2+2?") == "The answer to 2+2 is 4."
|
| 31 |
+
|
| 32 |
+
# create evaluator
|
| 33 |
+
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY" # https://platform.openai.com/account/api-keys
|
| 34 |
+
assert os.environ.get("OPENAI_API_KEY") is not None, "Please set OPENAI_API_KEY environment variable"
|
| 35 |
+
|
| 36 |
+
evaluation_llm = ChatOpenAI(model="gpt-4")
|