Spaces:
Runtime error
Runtime error
Add LoRA merging
Browse files
app.py
CHANGED
|
@@ -35,8 +35,13 @@ MODEL_PATHS = {
|
|
| 35 |
"sarcasm_less": "hallisky/lora-sarcasm-less-llama-3-8b",
|
| 36 |
"voice_passive": "hallisky/lora-voice-passive-llama-3-8b",
|
| 37 |
"voice_active": "hallisky/lora-voice-active-llama-3-8b",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
}
|
| 39 |
FIRST_MODEL = list(MODEL_PATHS.keys())[5]
|
|
|
|
| 40 |
|
| 41 |
DESCRIPTION = """\
|
| 42 |
# Authorship Obfuscation
|
|
@@ -113,49 +118,61 @@ def save_feedback(feedback_rating, feedback_text, latest_obfuscation):
|
|
| 113 |
return "No Feedback Selected", ""
|
| 114 |
|
| 115 |
@spaces.GPU
|
| 116 |
-
def greet(input_text, length, function_words, grade_level,
|
| 117 |
global latest_obfuscation, user_id
|
| 118 |
current_time = datetime.now().isoformat()
|
| 119 |
|
| 120 |
sliders_dict = {}
|
| 121 |
cur_keys = []
|
| 122 |
-
cur_keys.append(("length_more" if length > 0 else (None if length == 0 else "length_less"), length))
|
| 123 |
-
cur_keys.append(("function_more" if function_words > 0 else (None if function_words == 0 else "function_less"), function_words))
|
| 124 |
-
cur_keys.append(("grade_more" if grade_level > 0 else (None if grade_level == 0 else "grade_less"), grade_level))
|
| 125 |
-
cur_keys.append(("sarcasm_more" if sarcasm > 0 else (None if sarcasm == 0 else "sarcasm_less"), sarcasm))
|
| 126 |
-
cur_keys.append(("formality_more" if formality > 0 else (None if formality == 0 else "formality_less"), formality))
|
| 127 |
-
cur_keys.append(("voice_active" if voice > 0 else (None if voice == 0 else "voice_passive"),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
for cur_key in cur_keys:
|
| 130 |
if cur_key[0] is not None:
|
| 131 |
sliders_dict[cur_key[0]] = cur_key[1]
|
| 132 |
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
|
| 160 |
# Save the new obfuscation result and reset feedback
|
| 161 |
latest_obfuscation = {
|
|
|
|
| 35 |
"sarcasm_less": "hallisky/lora-sarcasm-less-llama-3-8b",
|
| 36 |
"voice_passive": "hallisky/lora-voice-passive-llama-3-8b",
|
| 37 |
"voice_active": "hallisky/lora-voice-active-llama-3-8b",
|
| 38 |
+
"type_persuasive": "hallisky/lora-type-persuasive-llama-3-8b",
|
| 39 |
+
"type_expository": "hallisky/lora-type-expository-llama-3-8b",
|
| 40 |
+
"type_narrative": "hallisky/lora-type-narrative-llama-3-8b",
|
| 41 |
+
"type_descriptive": "hallisky/lora-type-descriptive-llama-3-8b",
|
| 42 |
}
|
| 43 |
FIRST_MODEL = list(MODEL_PATHS.keys())[5]
|
| 44 |
+
MAX_NEW_TOKENS=1024
|
| 45 |
|
| 46 |
DESCRIPTION = """\
|
| 47 |
# Authorship Obfuscation
|
|
|
|
| 118 |
return "No Feedback Selected", ""
|
| 119 |
|
| 120 |
@spaces.GPU
|
| 121 |
+
def greet(input_text, length, function_words, grade_level, formality, sarcasm, voice, persuasive, descriptive, narrative, expository):
|
| 122 |
global latest_obfuscation, user_id
|
| 123 |
current_time = datetime.now().isoformat()
|
| 124 |
|
| 125 |
sliders_dict = {}
|
| 126 |
cur_keys = []
|
| 127 |
+
cur_keys.append(("length_more" if length > 0 else (None if length == 0 else "length_less"), abs(length)))
|
| 128 |
+
cur_keys.append(("function_more" if function_words > 0 else (None if function_words == 0 else "function_less"), abs(function_words)))
|
| 129 |
+
cur_keys.append(("grade_more" if grade_level > 0 else (None if grade_level == 0 else "grade_less"), abs(grade_level)))
|
| 130 |
+
cur_keys.append(("sarcasm_more" if sarcasm > 0 else (None if sarcasm == 0 else "sarcasm_less"), abs(sarcasm)))
|
| 131 |
+
cur_keys.append(("formality_more" if formality > 0 else (None if formality == 0 else "formality_less"), abs(formality)))
|
| 132 |
+
cur_keys.append(("voice_active" if voice > 0 else (None if voice == 0 else "voice_passive"),abs(voice)))
|
| 133 |
+
cur_keys.append(("type_persuasive" if persuasive != 0 else None, abs(persuasive)))
|
| 134 |
+
cur_keys.append(("type_descriptive" if descriptive != 0 else None, abs(descriptive)))
|
| 135 |
+
cur_keys.append(("type_narrative" if narrative != 0 else None, abs(narrative)))
|
| 136 |
+
cur_keys.append(("type_expository" if expository != 0 else None, abs(expository)))
|
| 137 |
|
| 138 |
for cur_key in cur_keys:
|
| 139 |
if cur_key[0] is not None:
|
| 140 |
sliders_dict[cur_key[0]] = cur_key[1]
|
| 141 |
|
| 142 |
+
# Make the adapter and switch to it
|
| 143 |
+
print(sliders_dict)
|
| 144 |
+
|
| 145 |
+
if len(sliders_dict) > 0:
|
| 146 |
+
combo_adapter_name = ""
|
| 147 |
+
for slider_key in sliders_dict:
|
| 148 |
+
print(slider_key)
|
| 149 |
+
print(sliders_dict[slider_key])
|
| 150 |
+
combo_adapter_name += slider_key + str(int(100*sliders_dict[slider_key])) + "-"
|
| 151 |
+
combo_adapter_name = combo_adapter_name[:-1]
|
| 152 |
+
print(combo_adapter_name)
|
| 153 |
+
print(list(sliders_dict.values()))
|
| 154 |
+
print(list(sliders_dict.keys()))
|
| 155 |
+
|
| 156 |
+
# Add and set the weighted adapater
|
| 157 |
+
model.add_weighted_adapter(
|
| 158 |
+
list(sliders_dict.keys()),
|
| 159 |
+
weights = list(sliders_dict.values()),
|
| 160 |
+
adapter_name = combo_adapter_name,
|
| 161 |
+
combination_type = "cat"
|
| 162 |
+
)
|
| 163 |
+
model.set_adapter(combo_adapter_name)
|
| 164 |
+
|
| 165 |
+
# Convert the list of strings in data to a list of model inputs
|
| 166 |
+
converted_text = convert_data_to_format(input_text)
|
| 167 |
+
inputs = tokenizer(converted_text, return_tensors="pt", max_length=2048, truncation=True).to(device)
|
| 168 |
+
input_length = inputs.input_ids.shape[1]
|
| 169 |
+
with torch.no_grad():
|
| 170 |
+
outputs = model.generate(**inputs, max_new_tokens=MAX_NEW_TOKENS, top_p = 0.95)
|
| 171 |
+
response = tokenizer.decode(outputs[0, input_length:], skip_special_tokens=True).strip()
|
| 172 |
+
else:
|
| 173 |
+
response = input_text # If no sliders passed, do not do anything
|
| 174 |
+
|
| 175 |
+
# print_nvidia_smi() # Print GPU usage
|
| 176 |
|
| 177 |
# Save the new obfuscation result and reset feedback
|
| 178 |
latest_obfuscation = {
|