Maximofn commited on
Commit
543aa6c
·
1 Parent(s): 4aaa328

Switch to smaller SmolLM2 model variant

Browse files

- Change model from SmolLM2-1.7B-Instruct to SmolLM2-135M-Instruct
- Update comments to improve clarity and readability
- Maintain existing cache directory and authentication configuration

Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -10,7 +10,7 @@ import os
10
  from dotenv import load_dotenv
11
  load_dotenv()
12
 
13
- # Configurar directorio de caché en un lugar con permisos
14
  os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
15
  os.environ["HF_HOME"] = "/tmp/hf_home"
16
 
@@ -19,20 +19,20 @@ HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN", os.getenv("HUGGINGFACE_T
19
  print(f"Token HuggingFace: {HUGGINGFACE_TOKEN}")
20
 
21
  # Model to use
22
- MODEL_NAME = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
23
 
24
  # Initialize the model and tokenizer locally with authentication
25
  print(f"Loading model {MODEL_NAME} locally...")
26
  tokenizer = AutoTokenizer.from_pretrained(
27
  MODEL_NAME,
28
  token=HUGGINGFACE_TOKEN, # Add token for authentication
29
- cache_dir="/tmp/transformers_cache" # Especificar directorio de caché
30
  )
31
  model = AutoModelForCausalLM.from_pretrained(
32
  MODEL_NAME,
33
  device_map="auto",
34
  token=HUGGINGFACE_TOKEN, # Add token for authentication
35
- cache_dir="/tmp/transformers_cache" # Especificar directorio de caché
36
  )
37
 
38
  # Create a pipeline to facilitate generation
 
10
  from dotenv import load_dotenv
11
  load_dotenv()
12
 
13
+ # Configure cache directory
14
  os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
15
  os.environ["HF_HOME"] = "/tmp/hf_home"
16
 
 
19
  print(f"Token HuggingFace: {HUGGINGFACE_TOKEN}")
20
 
21
  # Model to use
22
+ MODEL_NAME = "HuggingFaceTB/SmolLM2-135M-Instruct"
23
 
24
  # Initialize the model and tokenizer locally with authentication
25
  print(f"Loading model {MODEL_NAME} locally...")
26
  tokenizer = AutoTokenizer.from_pretrained(
27
  MODEL_NAME,
28
  token=HUGGINGFACE_TOKEN, # Add token for authentication
29
+ cache_dir="/tmp/transformers_cache" # Specify cache directory
30
  )
31
  model = AutoModelForCausalLM.from_pretrained(
32
  MODEL_NAME,
33
  device_map="auto",
34
  token=HUGGINGFACE_TOKEN, # Add token for authentication
35
+ cache_dir="/tmp/transformers_cache" # Specify cache directory
36
  )
37
 
38
  # Create a pipeline to facilitate generation