progs2002 commited on
Commit
b94af16
·
verified ·
1 Parent(s): c46d108

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- license: apache-2.0
3
- base_model: distilgpt2
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,9 +13,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # star-trek-tng-script-generator
15
 
16
- This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 3.3821
19
 
20
  ## Model description
21
 
@@ -34,31 +34,34 @@ More information needed
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
- - learning_rate: 0.001
38
- - train_batch_size: 16
39
- - eval_batch_size: 16
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
- - lr_scheduler_warmup_steps: 50
44
- - num_epochs: 5
45
 
46
  ### Training results
47
 
48
  | Training Loss | Epoch | Step | Validation Loss |
49
  |:-------------:|:-----:|:----:|:---------------:|
50
- | 3.3834 | 0.4 | 200 | 3.2320 |
51
- | 3.159 | 0.81 | 400 | 3.1637 |
52
- | 2.9317 | 1.21 | 600 | 3.1526 |
53
- | 2.8165 | 1.61 | 800 | 3.1281 |
54
- | 2.779 | 2.02 | 1000 | 3.1925 |
55
- | 2.4516 | 2.42 | 1200 | 3.1798 |
56
- | 2.4697 | 2.82 | 1400 | 3.1530 |
57
- | 2.2567 | 3.23 | 1600 | 3.2815 |
58
- | 2.1165 | 3.63 | 1800 | 3.2641 |
59
- | 2.0832 | 4.03 | 2000 | 3.3755 |
60
- | 1.8606 | 4.44 | 2200 | 3.3766 |
61
- | 1.8544 | 4.84 | 2400 | 3.3823 |
 
 
 
62
 
63
 
64
  ### Framework versions
 
1
  ---
2
+ license: mit
3
+ base_model: gpt2
4
  tags:
5
  - generated_from_trainer
6
  model-index:
 
13
 
14
  # star-trek-tng-script-generator
15
 
16
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 2.8036
19
 
20
  ## Model description
21
 
 
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
+ - learning_rate: 0.0002
38
+ - train_batch_size: 1
39
+ - eval_batch_size: 1
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
+ - lr_scheduler_warmup_steps: 100
44
+ - num_epochs: 2
45
 
46
  ### Training results
47
 
48
  | Training Loss | Epoch | Step | Validation Loss |
49
  |:-------------:|:-----:|:----:|:---------------:|
50
+ | 3.1852 | 0.13 | 500 | 3.0649 |
51
+ | 3.0477 | 0.26 | 1000 | 3.0007 |
52
+ | 2.9831 | 0.38 | 1500 | 2.9711 |
53
+ | 2.9662 | 0.51 | 2000 | 2.9474 |
54
+ | 2.9275 | 0.64 | 2500 | 2.9116 |
55
+ | 2.8711 | 0.77 | 3000 | 2.8952 |
56
+ | 2.8551 | 0.89 | 3500 | 2.8771 |
57
+ | 2.7449 | 1.02 | 4000 | 2.8645 |
58
+ | 2.4553 | 1.15 | 4500 | 2.8441 |
59
+ | 2.4575 | 1.28 | 5000 | 2.8457 |
60
+ | 2.4452 | 1.4 | 5500 | 2.8329 |
61
+ | 2.4256 | 1.53 | 6000 | 2.8180 |
62
+ | 2.3958 | 1.66 | 6500 | 2.8123 |
63
+ | 2.4084 | 1.79 | 7000 | 2.8049 |
64
+ | 2.3855 | 1.92 | 7500 | 2.8044 |
65
 
66
 
67
  ### Framework versions
config.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
- "_name_or_path": "distilgpt2",
3
- "_num_labels": 1,
4
  "activation_function": "gelu_new",
5
  "architectures": [
6
  "GPT2LMHeadModel"
@@ -10,13 +9,7 @@
10
  "do_sample": true,
11
  "embd_pdrop": 0.1,
12
  "eos_token_id": 50256,
13
- "id2label": {
14
- "0": "LABEL_0"
15
- },
16
  "initializer_range": 0.02,
17
- "label2id": {
18
- "LABEL_0": 0
19
- },
20
  "layer_norm_epsilon": 1e-05,
21
  "max_length": 50,
22
  "model_type": "gpt2",
@@ -24,7 +17,7 @@
24
  "n_embd": 768,
25
  "n_head": 12,
26
  "n_inner": null,
27
- "n_layer": 6,
28
  "n_positions": 1024,
29
  "reorder_and_upcast_attn": false,
30
  "resid_pdrop": 0.1,
 
1
  {
2
+ "_name_or_path": "gpt2",
 
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
9
  "do_sample": true,
10
  "embd_pdrop": 0.1,
11
  "eos_token_id": 50256,
 
 
 
12
  "initializer_range": 0.02,
 
 
 
13
  "layer_norm_epsilon": 1e-05,
14
  "max_length": 50,
15
  "model_type": "gpt2",
 
17
  "n_embd": 768,
18
  "n_head": 12,
19
  "n_inner": null,
20
+ "n_layer": 12,
21
  "n_positions": 1024,
22
  "reorder_and_upcast_attn": false,
23
  "resid_pdrop": 0.1,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:528e65928ef36e0d562ab35926ffe80482409acac4e978430eabec696917ecc7
3
- size 327657928
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9355cc23b5f74b3b858d4f402c43ad57fd3ec6e56c77a60baf506155c63cb4e3
3
+ size 497774208
runs/Jan21_06-01-41_0d30c48e12f2/events.out.tfevents.1705816903.0d30c48e12f2.839.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e591b852aaa49635ddc5f91ad1d99310f3e914ae95478dee1db416bd304b48f
3
+ size 6626
runs/Jan21_06-08-52_0d30c48e12f2/events.out.tfevents.1705817333.0d30c48e12f2.3053.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:317702624a5fab0db53dca0ff1ede1d9462f4afab660731b2db7802cfe2e8f2d
3
+ size 7055
runs/Jan21_06-25-07_0d30c48e12f2/events.out.tfevents.1705818308.0d30c48e12f2.7236.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:429f376fb2ddc21d40d594107b64e6eada28d874a7d728b78f26fe0fe0386afc
3
+ size 11262
runs/Jan21_06-25-07_0d30c48e12f2/events.out.tfevents.1705820410.0d30c48e12f2.7236.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:debfd472203bfb8f0c1a1f60de2295772ba82baae93ecd5cac576b56c78a3059
3
+ size 359
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c9fa27f09d1d32a838b01887fc9b333e4d7c696958c7590e289ee01efdc7b29
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51e5e009e728175c03f3bd3ef428a6117521748fce818f4193725a2b7582feb2
3
  size 4600