Upload folder using huggingface_hub
Browse files- checkpoint-5388/config.json +42 -0
- checkpoint-5388/optimizer.pt +3 -0
- checkpoint-5388/preprocessor_config.json +22 -0
- checkpoint-5388/pytorch_model.bin +3 -0
- checkpoint-5388/rng_state.pth +3 -0
- checkpoint-5388/scheduler.pt +3 -0
- checkpoint-5388/trainer_state.json +133 -0
- checkpoint-5388/training_args.bin +3 -0
- config.json +42 -0
- preprocessor_config.json +22 -0
- pytorch_model.bin +3 -0
- training_args.bin +3 -0
checkpoint-5388/config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "google/vit-base-patch16-224-in21k",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"ViTForImageClassification"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.0,
|
| 7 |
+
"encoder_stride": 16,
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_dropout_prob": 0.0,
|
| 10 |
+
"hidden_size": 768,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "sad",
|
| 13 |
+
"1": "neutral",
|
| 14 |
+
"2": "happy",
|
| 15 |
+
"3": "fear",
|
| 16 |
+
"4": "surprise",
|
| 17 |
+
"5": "disgust",
|
| 18 |
+
"6": "angry"
|
| 19 |
+
},
|
| 20 |
+
"image_size": 224,
|
| 21 |
+
"initializer_range": 0.02,
|
| 22 |
+
"intermediate_size": 3072,
|
| 23 |
+
"label2id": {
|
| 24 |
+
"angry": 6,
|
| 25 |
+
"disgust": 5,
|
| 26 |
+
"fear": 3,
|
| 27 |
+
"happy": 2,
|
| 28 |
+
"neutral": 1,
|
| 29 |
+
"sad": 0,
|
| 30 |
+
"surprise": 4
|
| 31 |
+
},
|
| 32 |
+
"layer_norm_eps": 1e-12,
|
| 33 |
+
"model_type": "vit",
|
| 34 |
+
"num_attention_heads": 12,
|
| 35 |
+
"num_channels": 3,
|
| 36 |
+
"num_hidden_layers": 12,
|
| 37 |
+
"patch_size": 16,
|
| 38 |
+
"problem_type": "single_label_classification",
|
| 39 |
+
"qkv_bias": true,
|
| 40 |
+
"torch_dtype": "float32",
|
| 41 |
+
"transformers_version": "4.33.3"
|
| 42 |
+
}
|
checkpoint-5388/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8bfb2e94b5dd49155bdce9f5c750e407231f5f7a6e0e0b2bf4c3e47d885b738
|
| 3 |
+
size 686599173
|
checkpoint-5388/preprocessor_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_rescale": true,
|
| 4 |
+
"do_resize": true,
|
| 5 |
+
"image_mean": [
|
| 6 |
+
0.5,
|
| 7 |
+
0.5,
|
| 8 |
+
0.5
|
| 9 |
+
],
|
| 10 |
+
"image_processor_type": "ViTImageProcessor",
|
| 11 |
+
"image_std": [
|
| 12 |
+
0.5,
|
| 13 |
+
0.5,
|
| 14 |
+
0.5
|
| 15 |
+
],
|
| 16 |
+
"resample": 2,
|
| 17 |
+
"rescale_factor": 0.00392156862745098,
|
| 18 |
+
"size": {
|
| 19 |
+
"height": 224,
|
| 20 |
+
"width": 224
|
| 21 |
+
}
|
| 22 |
+
}
|
checkpoint-5388/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59063a82126357566ef6fab2633ea83adef3aa9b687a637f45f6ea14389c2d20
|
| 3 |
+
size 343284077
|
checkpoint-5388/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:da90443e9888cfb17d69b3d263d207803e56b1fe5c3ed489072ae522e44cb75f
|
| 3 |
+
size 14575
|
checkpoint-5388/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d9c8f81aea360cf8d0299b836ef9e26ddbbea56251ba5d8a1d340459056ad4b
|
| 3 |
+
size 627
|
checkpoint-5388/trainer_state.json
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.946086049079895,
|
| 3 |
+
"best_model_checkpoint": "facial_emotions_image_detection/checkpoint-5388",
|
| 4 |
+
"epoch": 6.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 5388,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.56,
|
| 13 |
+
"learning_rate": 9.15698763581866e-06,
|
| 14 |
+
"loss": 1.4623,
|
| 15 |
+
"step": 500
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 1.0,
|
| 19 |
+
"eval_accuracy": 0.5976595151852884,
|
| 20 |
+
"eval_loss": 1.1609539985656738,
|
| 21 |
+
"eval_runtime": 62.7699,
|
| 22 |
+
"eval_samples_per_second": 114.354,
|
| 23 |
+
"eval_steps_per_second": 14.306,
|
| 24 |
+
"step": 898
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 1.11,
|
| 28 |
+
"learning_rate": 8.220307231172726e-06,
|
| 29 |
+
"loss": 1.1485,
|
| 30 |
+
"step": 1000
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"epoch": 1.67,
|
| 34 |
+
"learning_rate": 7.28362682652679e-06,
|
| 35 |
+
"loss": 1.0446,
|
| 36 |
+
"step": 1500
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"epoch": 2.0,
|
| 40 |
+
"eval_accuracy": 0.6316522708275285,
|
| 41 |
+
"eval_loss": 1.0506905317306519,
|
| 42 |
+
"eval_runtime": 61.7382,
|
| 43 |
+
"eval_samples_per_second": 116.265,
|
| 44 |
+
"eval_steps_per_second": 14.545,
|
| 45 |
+
"step": 1796
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 2.23,
|
| 49 |
+
"learning_rate": 6.346946421880855e-06,
|
| 50 |
+
"loss": 0.9763,
|
| 51 |
+
"step": 2000
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"epoch": 2.78,
|
| 55 |
+
"learning_rate": 5.41026601723492e-06,
|
| 56 |
+
"loss": 0.9268,
|
| 57 |
+
"step": 2500
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"epoch": 3.0,
|
| 61 |
+
"eval_accuracy": 0.6483700195040402,
|
| 62 |
+
"eval_loss": 0.9974768161773682,
|
| 63 |
+
"eval_runtime": 61.9455,
|
| 64 |
+
"eval_samples_per_second": 115.876,
|
| 65 |
+
"eval_steps_per_second": 14.497,
|
| 66 |
+
"step": 2694
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"epoch": 3.34,
|
| 70 |
+
"learning_rate": 4.473585612588985e-06,
|
| 71 |
+
"loss": 0.9028,
|
| 72 |
+
"step": 3000
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"epoch": 3.9,
|
| 76 |
+
"learning_rate": 3.53690520794305e-06,
|
| 77 |
+
"loss": 0.8685,
|
| 78 |
+
"step": 3500
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"epoch": 4.0,
|
| 82 |
+
"eval_accuracy": 0.6560323209807746,
|
| 83 |
+
"eval_loss": 0.9746624231338501,
|
| 84 |
+
"eval_runtime": 62.2276,
|
| 85 |
+
"eval_samples_per_second": 115.351,
|
| 86 |
+
"eval_steps_per_second": 14.431,
|
| 87 |
+
"step": 3592
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"epoch": 4.45,
|
| 91 |
+
"learning_rate": 2.600224803297115e-06,
|
| 92 |
+
"loss": 0.8314,
|
| 93 |
+
"step": 4000
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"epoch": 5.0,
|
| 97 |
+
"eval_accuracy": 0.6642518807467261,
|
| 98 |
+
"eval_loss": 0.9527061581611633,
|
| 99 |
+
"eval_runtime": 62.924,
|
| 100 |
+
"eval_samples_per_second": 114.074,
|
| 101 |
+
"eval_steps_per_second": 14.271,
|
| 102 |
+
"step": 4490
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"epoch": 5.01,
|
| 106 |
+
"learning_rate": 1.6635443986511805e-06,
|
| 107 |
+
"loss": 0.8228,
|
| 108 |
+
"step": 4500
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"epoch": 5.57,
|
| 112 |
+
"learning_rate": 7.268639940052455e-07,
|
| 113 |
+
"loss": 0.802,
|
| 114 |
+
"step": 5000
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"epoch": 6.0,
|
| 118 |
+
"eval_accuracy": 0.6668988576205072,
|
| 119 |
+
"eval_loss": 0.946086049079895,
|
| 120 |
+
"eval_runtime": 62.799,
|
| 121 |
+
"eval_samples_per_second": 114.301,
|
| 122 |
+
"eval_steps_per_second": 14.3,
|
| 123 |
+
"step": 5388
|
| 124 |
+
}
|
| 125 |
+
],
|
| 126 |
+
"logging_steps": 500,
|
| 127 |
+
"max_steps": 5388,
|
| 128 |
+
"num_train_epochs": 6,
|
| 129 |
+
"save_steps": 500,
|
| 130 |
+
"total_flos": 1.3348903362258014e+19,
|
| 131 |
+
"trial_name": null,
|
| 132 |
+
"trial_params": null
|
| 133 |
+
}
|
checkpoint-5388/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b150c316174e87bb51b60759b2ed8868245101c52a124909a473c8e61332e98d
|
| 3 |
+
size 4027
|
config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "google/vit-base-patch16-224-in21k",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"ViTForImageClassification"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.0,
|
| 7 |
+
"encoder_stride": 16,
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_dropout_prob": 0.0,
|
| 10 |
+
"hidden_size": 768,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "sad",
|
| 13 |
+
"1": "neutral",
|
| 14 |
+
"2": "happy",
|
| 15 |
+
"3": "fear",
|
| 16 |
+
"4": "surprise",
|
| 17 |
+
"5": "disgust",
|
| 18 |
+
"6": "angry"
|
| 19 |
+
},
|
| 20 |
+
"image_size": 224,
|
| 21 |
+
"initializer_range": 0.02,
|
| 22 |
+
"intermediate_size": 3072,
|
| 23 |
+
"label2id": {
|
| 24 |
+
"angry": 6,
|
| 25 |
+
"disgust": 5,
|
| 26 |
+
"fear": 3,
|
| 27 |
+
"happy": 2,
|
| 28 |
+
"neutral": 1,
|
| 29 |
+
"sad": 0,
|
| 30 |
+
"surprise": 4
|
| 31 |
+
},
|
| 32 |
+
"layer_norm_eps": 1e-12,
|
| 33 |
+
"model_type": "vit",
|
| 34 |
+
"num_attention_heads": 12,
|
| 35 |
+
"num_channels": 3,
|
| 36 |
+
"num_hidden_layers": 12,
|
| 37 |
+
"patch_size": 16,
|
| 38 |
+
"problem_type": "single_label_classification",
|
| 39 |
+
"qkv_bias": true,
|
| 40 |
+
"torch_dtype": "float32",
|
| 41 |
+
"transformers_version": "4.33.3"
|
| 42 |
+
}
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_rescale": true,
|
| 4 |
+
"do_resize": true,
|
| 5 |
+
"image_mean": [
|
| 6 |
+
0.5,
|
| 7 |
+
0.5,
|
| 8 |
+
0.5
|
| 9 |
+
],
|
| 10 |
+
"image_processor_type": "ViTImageProcessor",
|
| 11 |
+
"image_std": [
|
| 12 |
+
0.5,
|
| 13 |
+
0.5,
|
| 14 |
+
0.5
|
| 15 |
+
],
|
| 16 |
+
"resample": 2,
|
| 17 |
+
"rescale_factor": 0.00392156862745098,
|
| 18 |
+
"size": {
|
| 19 |
+
"height": 224,
|
| 20 |
+
"width": 224
|
| 21 |
+
}
|
| 22 |
+
}
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59063a82126357566ef6fab2633ea83adef3aa9b687a637f45f6ea14389c2d20
|
| 3 |
+
size 343284077
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b150c316174e87bb51b60759b2ed8868245101c52a124909a473c8e61332e98d
|
| 3 |
+
size 4027
|