progs2002 commited on
Commit
c41f655
·
1 Parent(s): 4417d96

updated README.md, changed slider default values

Browse files
Files changed (3) hide show
  1. README.md +5 -0
  2. app.py +4 -4
  3. model.py +1 -1
README.md CHANGED
@@ -7,7 +7,12 @@ sdk: streamlit
7
  sdk_version: 1.30.0
8
  app_file: app.py
9
  pinned: false
 
10
  license: mit
 
 
 
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
7
  sdk_version: 1.30.0
8
  app_file: app.py
9
  pinned: false
10
+ models: progs2002/star-trek-tng-script-generator
11
  license: mit
12
+
13
+ preload_from_hub:
14
+ - progs2002/star-trek-tng-script-generator/model.safetensors
15
+ - progs2002/star-trek-tng-script-generator/tokenizer.json
16
  ---
17
 
18
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -34,8 +34,8 @@ import streamlit_scrollable_textbox as stx
34
 
35
  st.text(ascii_art)
36
 
37
- with st.spinner("Please wait... loading model"):
38
- llm = LLM()
39
 
40
  demo_text = """DATA: The ship has gone into warp, sir.
41
  RIKER: Who gave the command?
@@ -48,11 +48,11 @@ col1, col2 = st.columns(2)
48
 
49
  with col1:
50
  temp = st.slider('Temperature', 0.0, 1.0, 1.0, 0.1)
51
- max_len = st.number_input('Max length', min_value=1, max_value=2048, value=512)
52
 
53
  with col2:
54
  top_p = st.slider('p', 0.0, 1.0, 0.95, 0.1)
55
- top_k = st.slider('k', 1, 100, 50, 5)
56
 
57
  if st.button("Generate"):
58
  with st.spinner("Generating text..."):
 
34
 
35
  st.text(ascii_art)
36
 
37
+ # with st.spinner("Please wait... loading model"):
38
+ llm = LLM()
39
 
40
  demo_text = """DATA: The ship has gone into warp, sir.
41
  RIKER: Who gave the command?
 
48
 
49
  with col1:
50
  temp = st.slider('Temperature', 0.0, 1.0, 1.0, 0.1)
51
+ max_len = st.number_input('Max new tokens', min_value=1, max_value=2048, value=128)
52
 
53
  with col2:
54
  top_p = st.slider('p', 0.0, 1.0, 0.95, 0.1)
55
+ top_k = st.slider('k', 1, 100, 1, 5)
56
 
57
  if st.button("Generate"):
58
  with st.spinner("Generating text..."):
model.py CHANGED
@@ -10,7 +10,7 @@ class LLM:
10
 
11
  output_tokens = self.model.generate(
12
  input_ids = encoded_prompt,
13
- max_length = max_len,
14
  do_sample=True,
15
  num_return_sequences=1,
16
  pad_token_id=self.model.config.eos_token_id,
 
10
 
11
  output_tokens = self.model.generate(
12
  input_ids = encoded_prompt,
13
+ max_new_tokens = max_len,
14
  do_sample=True,
15
  num_return_sequences=1,
16
  pad_token_id=self.model.config.eos_token_id,