amsa02 commited on
Commit
1f4dedc
·
verified ·
1 Parent(s): ef94b7e

Upload 2 files

Browse files
Files changed (2) hide show
  1. copious.py +100 -0
  2. run_test.py +45 -0
copious.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convert Brat format annotations to JSONL format for NER training.
3
+
4
+ Author: Amir Safari
5
+ Date: 17.10.2025
6
+
7
+ This script processes Brat annotation files (.ann and .txt) from train/dev/test
8
+ directories and converts them into JSONL format suitable for NER model training.
9
+ """
10
+ import json
11
+ import re
12
+ from pathlib import Path
13
+
14
+ print("Starting data conversion from Brat format to JSON Lines...")
15
+
16
+ # Tag mapping: Create a dictionary to convert tag names to integer IDs
17
+ NER_TAGS = [
18
+ "O", "B-Taxon", "I-Taxon", "B-Geographical_Location", "I-Geographical_Location",
19
+ "B-Habitat", "I-Habitat", "B-Temporal_Expression", "I-Temporal_Expression",
20
+ "B-Person", "I-Person",
21
+ ]
22
+
23
+ # Create a mapping from tag name to integer ID
24
+ tag2id = {tag: i for i, tag in enumerate(NER_TAGS)}
25
+
26
+ # Process each split directory (train, dev, test)
27
+ for split in ["train", "dev", "test"]:
28
+ print(f"\nProcessing '{split}' split...")
29
+ input_dir = Path(split)
30
+ output_file = f"{split}.jsonl"
31
+
32
+ if not input_dir.exists():
33
+ # Skip if directory doesn't exist
34
+ print(f"Directory not found: {input_dir}. Skipping split.")
35
+ continue
36
+
37
+ with open(output_file, "w", encoding="utf-8") as outfile:
38
+ # Find all .ann files and process them with their corresponding .txt files
39
+ ann_files = sorted(input_dir.glob("*.ann"))
40
+ for ann_file in ann_files:
41
+ txt_file = ann_file.with_suffix(".txt")
42
+ if not txt_file.exists():
43
+ continue
44
+
45
+ with open(txt_file, "r", encoding="utf-8") as f:
46
+ # Tokenize the text by finding all non-whitespace sequences
47
+ text = f.read()
48
+
49
+ tokens_with_spans = [{"text": match.group(0), "start": match.start(), "end": match.end()} for match in
50
+ re.finditer(r'\S+', text)]
51
+ if not tokens_with_spans:
52
+ continue
53
+
54
+ tokens = [t["text"] for t in tokens_with_spans]
55
+ ner_tags = ["O"] * len(tokens)
56
+ # Parse the .ann file to extract entity annotations
57
+ with open(ann_file, "r", encoding="utf-8") as f:
58
+ annotations = []
59
+ # Apply BIO tagging scheme to tokens based on character span overlaps
60
+ for line in f:
61
+ if not line.startswith("T"): continue
62
+ parts = line.strip().split("\t")
63
+ if len(parts) < 2: continue
64
+ tag_info = parts[1]
65
+ tag_parts = tag_info.split(" ")
66
+ label = tag_parts[0].replace(" ", "_")
67
+ spans_str = " ".join(tag_parts[1:])
68
+ char_spans = []
69
+
70
+ for span_part in spans_str.split(';'):
71
+ try:
72
+ start, end = map(int, span_part.split(' '))
73
+ char_spans.append((start, end))
74
+ except ValueError:
75
+ continue
76
+ if char_spans:
77
+ annotations.append({"label": label, "spans": char_spans})
78
+
79
+ for ann in annotations:
80
+ is_first_token = True
81
+ for start_char, end_char in ann["spans"]:
82
+ for i, token in enumerate(tokens_with_spans):
83
+ if token["start"] < end_char and token["end"] > start_char:
84
+ ner_tags[i] = f"B-{ann['label']}" if is_first_token else f"I-{ann['label']}"
85
+ is_first_token = False
86
+
87
+ # Convert tag strings to integer IDs for model compatibility
88
+ ner_tag_ids = [tag2id.get(tag, tag2id["O"]) for tag in ner_tags]
89
+
90
+ # Write the processed example as a single JSON line
91
+ json_line = json.dumps({
92
+ "id": txt_file.stem,
93
+ "tokens": tokens,
94
+ "ner_tags": ner_tag_ids
95
+ })
96
+ outfile.write(json_line + "\n")
97
+
98
+ print(f"Successfully created {output_file}")
99
+
100
+ print("\nConversion complete! ✨")
run_test.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test script to load and verify the converted JSONL dataset.
3
+
4
+ Author: Amir Safari
5
+ Date: 17.10.2025
6
+
7
+ This script loads the converted JSONL files (train.jsonl, dev.jsonl, test.jsonl)
8
+ and verifies they can be properly loaded with the HuggingFace datasets library.
9
+ """
10
+ from datasets import load_dataset, Features, Value, Sequence, ClassLabel
11
+
12
+ print("Attempting to load the converted JSONL dataset...")
13
+
14
+ # Define all possible NER tags for the dataset
15
+ NER_TAGS = [
16
+ "O", "B-Taxon", "I-Taxon", "B-Geographical_Location", "I-Geographical_Location",
17
+ "B-Habitat", "I-Habitat", "B-Temporal_Expression", "I-Temporal_Expression",
18
+ "B-Person", "I-Person",
19
+ ]
20
+
21
+ # Manually define the features object for robustness
22
+ features = Features({
23
+ 'id': Value('string'),
24
+ 'tokens': Sequence(Value('string')),
25
+ 'ner_tags': Sequence(ClassLabel(names=NER_TAGS))
26
+ })
27
+
28
+ try:
29
+ # Load the JSON files directly using the manually defined features
30
+ dataset = load_dataset("json", data_files={
31
+ "train": "train.jsonl",
32
+ "validation": "dev.jsonl",
33
+ "test": "test.jsonl"
34
+ }, features=features)
35
+
36
+ print("\n✅ Success! The dataset was loaded correctly.")
37
+ print("Here is the loaded dataset info:")
38
+ print(dataset)
39
+
40
+ print("\nHere's the first training example:")
41
+ print(dataset["train"][0])
42
+
43
+ except Exception as e:
44
+ print(f"\n❌ An error occurred: {e}")
45
+ print("Please make sure the 'train.jsonl', 'dev.jsonl', and 'test.jsonl' files exist.")