Addyk24 commited on
Commit
2d2b19d
·
verified ·
1 Parent(s): 429a0db

Upload fine-tuned hazard model

Browse files
.gitattributes CHANGED
@@ -1,35 +1,11 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
1
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.safetensors filter=lfs diff=lfs merge=lfs -text
3
+ checkpoint-120/optimizer.pt filter=lfs diff=lfs merge=lfs -text
4
+ checkpoint-120/rng_state.pth filter=lfs diff=lfs merge=lfs -text
5
+ checkpoint-120/scheduler.pt filter=lfs diff=lfs merge=lfs -text
6
+ checkpoint-180/optimizer.pt filter=lfs diff=lfs merge=lfs -text
7
+ checkpoint-180/rng_state.pth filter=lfs diff=lfs merge=lfs -text
8
+ checkpoint-180/scheduler.pt filter=lfs diff=lfs merge=lfs -text
9
+ checkpoint-60/optimizer.pt filter=lfs diff=lfs merge=lfs -text
10
+ checkpoint-60/rng_state.pth filter=lfs diff=lfs merge=lfs -text
11
+ checkpoint-60/scheduler.pt filter=lfs diff=lfs merge=lfs -text
 
checkpoint-120/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "tsunami",
14
+ "1": "storm_surge",
15
+ "2": "high_waves",
16
+ "3": "coastal_flooding",
17
+ "4": "swell_surge",
18
+ "5": "rip_current",
19
+ "6": "other",
20
+ "7": "no_hazard",
21
+ "8": "flood",
22
+ "9": "abnormal_tide"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "abnormal_tide": 9,
28
+ "coastal_flooding": 3,
29
+ "flood": 8,
30
+ "high_waves": 2,
31
+ "no_hazard": 7,
32
+ "other": 6,
33
+ "rip_current": 5,
34
+ "storm_surge": 1,
35
+ "swell_surge": 4,
36
+ "tsunami": 0
37
+ },
38
+ "layer_norm_eps": 1e-05,
39
+ "max_position_embeddings": 514,
40
+ "model_type": "xlm-roberta",
41
+ "num_attention_heads": 12,
42
+ "num_hidden_layers": 12,
43
+ "output_past": true,
44
+ "pad_token_id": 1,
45
+ "position_embedding_type": "absolute",
46
+ "problem_type": "single_label_classification",
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.51.3",
49
+ "type_vocab_size": 1,
50
+ "use_cache": true,
51
+ "vocab_size": 250002
52
+ }
checkpoint-120/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b8f5c74f542181778732c2cd9eae988779125f4bcb25c41e264e3ef5f4a5643
3
+ size 1112229616
checkpoint-120/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f79e8cb47fbb370cc7d766544c8adb44b35b50682370868667ad1466241f3051
3
+ size 2224573178
checkpoint-120/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40270b0e08c5d776630a44bd81dca97ed397d7c82bc375e71120c4811518a698
3
+ size 13990
checkpoint-120/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa48eb840668bc51688608e36d63233415150e64db1c1a48ae2dddba60bfb28d
3
+ size 1064
checkpoint-120/trainer_state.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 120,
3
+ "best_metric": 0.739890992641449,
4
+ "best_model_checkpoint": "../trained_models/hazard_model\\checkpoint-120",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 120,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_loss": 2.024980068206787,
15
+ "eval_runtime": 29.0401,
16
+ "eval_samples_per_second": 8.264,
17
+ "eval_steps_per_second": 0.517,
18
+ "step": 60
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_loss": 0.739890992641449,
23
+ "eval_runtime": 28.9542,
24
+ "eval_samples_per_second": 8.289,
25
+ "eval_steps_per_second": 0.518,
26
+ "step": 120
27
+ }
28
+ ],
29
+ "logging_steps": 500,
30
+ "max_steps": 180,
31
+ "num_input_tokens_seen": 0,
32
+ "num_train_epochs": 3,
33
+ "save_steps": 500,
34
+ "stateful_callbacks": {
35
+ "TrainerControl": {
36
+ "args": {
37
+ "should_epoch_stop": false,
38
+ "should_evaluate": false,
39
+ "should_log": false,
40
+ "should_save": true,
41
+ "should_training_stop": false
42
+ },
43
+ "attributes": {}
44
+ }
45
+ },
46
+ "total_flos": 126302378065920.0,
47
+ "train_batch_size": 16,
48
+ "trial_name": null,
49
+ "trial_params": null
50
+ }
checkpoint-120/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c133ab9071417e8435d247240909acb0637b0dfb012e665c137a9bf58fe86670
3
+ size 5304
checkpoint-180/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "tsunami",
14
+ "1": "storm_surge",
15
+ "2": "high_waves",
16
+ "3": "coastal_flooding",
17
+ "4": "swell_surge",
18
+ "5": "rip_current",
19
+ "6": "other",
20
+ "7": "no_hazard",
21
+ "8": "flood",
22
+ "9": "abnormal_tide"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "abnormal_tide": 9,
28
+ "coastal_flooding": 3,
29
+ "flood": 8,
30
+ "high_waves": 2,
31
+ "no_hazard": 7,
32
+ "other": 6,
33
+ "rip_current": 5,
34
+ "storm_surge": 1,
35
+ "swell_surge": 4,
36
+ "tsunami": 0
37
+ },
38
+ "layer_norm_eps": 1e-05,
39
+ "max_position_embeddings": 514,
40
+ "model_type": "xlm-roberta",
41
+ "num_attention_heads": 12,
42
+ "num_hidden_layers": 12,
43
+ "output_past": true,
44
+ "pad_token_id": 1,
45
+ "position_embedding_type": "absolute",
46
+ "problem_type": "single_label_classification",
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.51.3",
49
+ "type_vocab_size": 1,
50
+ "use_cache": true,
51
+ "vocab_size": 250002
52
+ }
checkpoint-180/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73b0790fec86b8bd9bed2843a5b146645c729a6bb4411d0572b21a6e7f4e36e
3
+ size 1112229616
checkpoint-180/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df383470f6409a4dbe698def7a11779d8227209c865b2b089f6230490a931f6
3
+ size 2224573178
checkpoint-180/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b1b0a9452f4e0e77a32263f3627a1d6bc3b923a25e6a0581df865762394aefd
3
+ size 13990
checkpoint-180/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd1ce2006b6b8539e68c6306c26d9705d62485431acc728c859d7f524437aa6
3
+ size 1064
checkpoint-180/trainer_state.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 180,
3
+ "best_metric": 0.07337220758199692,
4
+ "best_model_checkpoint": "../trained_models/hazard_model\\checkpoint-180",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 180,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_loss": 2.024980068206787,
15
+ "eval_runtime": 29.0401,
16
+ "eval_samples_per_second": 8.264,
17
+ "eval_steps_per_second": 0.517,
18
+ "step": 60
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_loss": 0.739890992641449,
23
+ "eval_runtime": 28.9542,
24
+ "eval_samples_per_second": 8.289,
25
+ "eval_steps_per_second": 0.518,
26
+ "step": 120
27
+ },
28
+ {
29
+ "epoch": 3.0,
30
+ "eval_loss": 0.07337220758199692,
31
+ "eval_runtime": 45.0725,
32
+ "eval_samples_per_second": 5.325,
33
+ "eval_steps_per_second": 0.333,
34
+ "step": 180
35
+ }
36
+ ],
37
+ "logging_steps": 500,
38
+ "max_steps": 180,
39
+ "num_input_tokens_seen": 0,
40
+ "num_train_epochs": 3,
41
+ "save_steps": 500,
42
+ "stateful_callbacks": {
43
+ "TrainerControl": {
44
+ "args": {
45
+ "should_epoch_stop": false,
46
+ "should_evaluate": false,
47
+ "should_log": false,
48
+ "should_save": true,
49
+ "should_training_stop": true
50
+ },
51
+ "attributes": {}
52
+ }
53
+ },
54
+ "total_flos": 189453567098880.0,
55
+ "train_batch_size": 16,
56
+ "trial_name": null,
57
+ "trial_params": null
58
+ }
checkpoint-180/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c133ab9071417e8435d247240909acb0637b0dfb012e665c137a9bf58fe86670
3
+ size 5304
checkpoint-60/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "tsunami",
14
+ "1": "storm_surge",
15
+ "2": "high_waves",
16
+ "3": "coastal_flooding",
17
+ "4": "swell_surge",
18
+ "5": "rip_current",
19
+ "6": "other",
20
+ "7": "no_hazard",
21
+ "8": "flood",
22
+ "9": "abnormal_tide"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "abnormal_tide": 9,
28
+ "coastal_flooding": 3,
29
+ "flood": 8,
30
+ "high_waves": 2,
31
+ "no_hazard": 7,
32
+ "other": 6,
33
+ "rip_current": 5,
34
+ "storm_surge": 1,
35
+ "swell_surge": 4,
36
+ "tsunami": 0
37
+ },
38
+ "layer_norm_eps": 1e-05,
39
+ "max_position_embeddings": 514,
40
+ "model_type": "xlm-roberta",
41
+ "num_attention_heads": 12,
42
+ "num_hidden_layers": 12,
43
+ "output_past": true,
44
+ "pad_token_id": 1,
45
+ "position_embedding_type": "absolute",
46
+ "problem_type": "single_label_classification",
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.51.3",
49
+ "type_vocab_size": 1,
50
+ "use_cache": true,
51
+ "vocab_size": 250002
52
+ }
checkpoint-60/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:affcbe728aa1f782820be2cfde50bac874c30abf9423b5b100222c079a4c3627
3
+ size 1112229616
checkpoint-60/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1f889a2ccb7ce22cbf94ac846ac73d8722d138c6060511f40b2efd37eeac0cb
3
+ size 2224573178
checkpoint-60/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834309684b7e1a13f7cbbd21856123e81d2c6663ef0e38c45dfd61e53a76bc9f
3
+ size 13990
checkpoint-60/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4412989c9a4572030898bd2b46a1c3c3b9a02cfb524aa85a5429582c6b507f5e
3
+ size 1064
checkpoint-60/trainer_state.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 60,
3
+ "best_metric": 2.024980068206787,
4
+ "best_model_checkpoint": "../trained_models/hazard_model\\checkpoint-60",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 60,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_loss": 2.024980068206787,
15
+ "eval_runtime": 29.0401,
16
+ "eval_samples_per_second": 8.264,
17
+ "eval_steps_per_second": 0.517,
18
+ "step": 60
19
+ }
20
+ ],
21
+ "logging_steps": 500,
22
+ "max_steps": 180,
23
+ "num_input_tokens_seen": 0,
24
+ "num_train_epochs": 3,
25
+ "save_steps": 500,
26
+ "stateful_callbacks": {
27
+ "TrainerControl": {
28
+ "args": {
29
+ "should_epoch_stop": false,
30
+ "should_evaluate": false,
31
+ "should_log": false,
32
+ "should_save": true,
33
+ "should_training_stop": false
34
+ },
35
+ "attributes": {}
36
+ }
37
+ },
38
+ "total_flos": 63151189032960.0,
39
+ "train_batch_size": 16,
40
+ "trial_name": null,
41
+ "trial_params": null
42
+ }
checkpoint-60/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c133ab9071417e8435d247240909acb0637b0dfb012e665c137a9bf58fe86670
3
+ size 5304
config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "tsunami",
14
+ "1": "storm_surge",
15
+ "2": "high_waves",
16
+ "3": "coastal_flooding",
17
+ "4": "swell_surge",
18
+ "5": "rip_current",
19
+ "6": "other",
20
+ "7": "no_hazard",
21
+ "8": "flood",
22
+ "9": "abnormal_tide"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "abnormal_tide": 9,
28
+ "coastal_flooding": 3,
29
+ "flood": 8,
30
+ "high_waves": 2,
31
+ "no_hazard": 7,
32
+ "other": 6,
33
+ "rip_current": 5,
34
+ "storm_surge": 1,
35
+ "swell_surge": 4,
36
+ "tsunami": 0
37
+ },
38
+ "layer_norm_eps": 1e-05,
39
+ "max_position_embeddings": 514,
40
+ "model_type": "xlm-roberta",
41
+ "num_attention_heads": 12,
42
+ "num_hidden_layers": 12,
43
+ "output_past": true,
44
+ "pad_token_id": 1,
45
+ "position_embedding_type": "absolute",
46
+ "problem_type": "single_label_classification",
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.51.3",
49
+ "type_vocab_size": 1,
50
+ "use_cache": true,
51
+ "vocab_size": 250002
52
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73b0790fec86b8bd9bed2843a5b146645c729a6bb4411d0572b21a6e7f4e36e
3
+ size 1112229616
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c133ab9071417e8435d247240909acb0637b0dfb012e665c137a9bf58fe86670
3
+ size 5304