psidharth567 commited on
Commit
65bd563
·
verified ·
1 Parent(s): 4135f3b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -57,3 +57,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ normalization/merged_transliteration_sampled.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ normalization/punctuation_test_merged.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ normalization/sampled_100k_norm.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ normalization/sampled_100k_translit.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ normalization/test_split_translit.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ normalization/train_set_punct_2.02M_ready.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ normalization/train_set_punct_final_no_leak.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ normalization/transliteration_sampled_850k.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ normalization/transliteration_sampled_850k_no_leak.jsonl filter=lfs diff=lfs merge=lfs -text
normalization/merged_transliteration_sampled.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9366505499dee8ed19ff27998ef3fb5ddd502ffa3d60d28bf9fbd054d12be81f
3
+ size 7410537492
normalization/punctuation_test_merged.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11b392c4ebad7a6f47ef2fc88aac92143ecfeec94276d0f44e78f0f44d7ef5cc
3
+ size 214068415
normalization/sample_test_split.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Sample 1000 samples per language from merged_transliteration_sampled.jsonl
4
+ with constraints:
5
+ - Each sample must have < 100 words (counting both input_text and output_text)
6
+ - Samples must not exist in sampled_100k_translit.jsonl (train split)
7
+ """
8
+
9
+ import json
10
+ import random
11
+ from collections import defaultdict
12
+ from typing import Set, Dict, List, Tuple
13
+
14
+ def count_words(text: str) -> int:
15
+ """Count words in a text string."""
16
+ if not text:
17
+ return 0
18
+ return len(text.split())
19
+
20
+ def load_train_split(train_file: str) -> Set[Tuple[str, str]]:
21
+ """
22
+ Load train split and create a set of (input_text, output_text) tuples
23
+ for fast lookup to avoid duplicates.
24
+ """
25
+ train_samples = set()
26
+ print(f"Loading train split from {train_file}...")
27
+ with open(train_file, 'r', encoding='utf-8') as f:
28
+ for line_num, line in enumerate(f, 1):
29
+ if line_num % 10000 == 0:
30
+ print(f" Processed {line_num} lines...")
31
+ try:
32
+ data = json.loads(line.strip())
33
+ input_text = data.get('input_text', '').strip()
34
+ output_text = data.get('output_text', '').strip()
35
+ # Create a tuple for deduplication
36
+ train_samples.add((input_text, output_text))
37
+ except json.JSONDecodeError:
38
+ continue
39
+ print(f"Loaded {len(train_samples)} samples from train split")
40
+ return train_samples
41
+
42
+ def sample_test_split(
43
+ source_file: str,
44
+ train_samples: Set[Tuple[str, str]],
45
+ samples_per_language: int = 1000,
46
+ max_words: int = 100
47
+ ) -> Dict[str, List[Dict]]:
48
+ """
49
+ Sample test split from source file.
50
+
51
+ Returns:
52
+ Dictionary mapping language to list of sampled samples
53
+ """
54
+ # Group samples by language
55
+ samples_by_language = defaultdict(list)
56
+
57
+ print(f"\nReading source file: {source_file}")
58
+ with open(source_file, 'r', encoding='utf-8') as f:
59
+ for line_num, line in enumerate(f, 1):
60
+ if line_num % 100000 == 0:
61
+ print(f" Processed {line_num} lines...")
62
+ try:
63
+ data = json.loads(line.strip())
64
+ input_text = data.get('input_text', '').strip()
65
+ output_text = data.get('output_text', '').strip()
66
+ language = data.get('language', '').strip()
67
+
68
+ if not language or not input_text or not output_text:
69
+ continue
70
+
71
+ # Check word count
72
+ total_words = count_words(input_text) + count_words(output_text)
73
+ if total_words >= max_words:
74
+ continue
75
+
76
+ # Check if sample exists in train split
77
+ sample_tuple = (input_text, output_text)
78
+ if sample_tuple in train_samples:
79
+ continue
80
+
81
+ # Add to language group
82
+ samples_by_language[language].append(data)
83
+ except json.JSONDecodeError:
84
+ continue
85
+
86
+ print(f"\nFound samples by language:")
87
+ for lang, samples in samples_by_language.items():
88
+ print(f" {lang}: {len(samples)} samples")
89
+
90
+ # Sample from each language
91
+ sampled_data = {}
92
+ print(f"\nSampling {samples_per_language} samples per language...")
93
+ for language, samples in samples_by_language.items():
94
+ if len(samples) < samples_per_language:
95
+ print(f" WARNING: {language} has only {len(samples)} samples, "
96
+ f"requested {samples_per_language}. Using all available.")
97
+ sampled_data[language] = samples
98
+ else:
99
+ sampled_data[language] = random.sample(samples, samples_per_language)
100
+ print(f" {language}: sampled {len(sampled_data[language])} samples")
101
+
102
+ return sampled_data
103
+
104
+ def write_output(sampled_data: Dict[str, List[Dict]], output_file: str):
105
+ """Write sampled data to output file."""
106
+ print(f"\nWriting output to {output_file}...")
107
+ total_samples = 0
108
+ with open(output_file, 'w', encoding='utf-8') as f:
109
+ for language, samples in sorted(sampled_data.items()):
110
+ for sample in samples:
111
+ f.write(json.dumps(sample, ensure_ascii=False) + '\n')
112
+ total_samples += 1
113
+ print(f"Written {total_samples} samples to {output_file}")
114
+
115
+ def main():
116
+ source_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/merged_transliteration_sampled.jsonl"
117
+ train_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/sampled_100k_translit.jsonl"
118
+ output_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/test_split_translit.jsonl"
119
+
120
+ # Set random seed for reproducibility
121
+ random.seed(42)
122
+
123
+ # Load train split to avoid duplicates
124
+ train_samples = load_train_split(train_file)
125
+
126
+ # Sample test split
127
+ sampled_data = sample_test_split(
128
+ source_file=source_file,
129
+ train_samples=train_samples,
130
+ samples_per_language=1000,
131
+ max_words=100
132
+ )
133
+
134
+ # Write output
135
+ write_output(sampled_data, output_file)
136
+
137
+ print("\nDone!")
138
+
139
+ if __name__ == "__main__":
140
+ main()
141
+
normalization/sampled_100k_norm.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8c9106fe66602bc8e970e8e884e265c32cba53fdc7bd5f092c623284125fa85
3
+ size 195199499
normalization/sampled_100k_translit.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f3f1b401d00531fd55f4f63f67094a8b4ccb3a96a6bf833233096c601aafede
3
+ size 294123706
normalization/test_split_translit.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f673d2ba5012a3110950811cc49c27390576399304922823937295019fd4eb2
3
+ size 10589046
normalization/train_set_punct_2.02M_ready.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ce43128a1c2f0cfab9adad54a8c95813e171a40c314c9774ecddb8f1283f12
3
+ size 24622765526
normalization/train_set_punct_final_no_leak.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e2015311cbd174340c6abbc20920b715a95582d39bed47ae80ef34eae0ad38
3
+ size 8318667133
normalization/transliteration_sampled_850k.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a13d77b258bdad3868c30da8190a46684d87db22ebee5da8b4427999662b1ed
3
+ size 1644349336
normalization/transliteration_sampled_850k_no_leak.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f0cdd6f7a0f9f10703b3634ecf5db27c9518eaf5511c127178e16bf6f316c03
3
+ size 1638823710