Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
SaylorTwift HF Staff commited on
Commit
ee28d08
·
verified ·
1 Parent(s): ed435f8

Add 'systematic_review_inclusion' config data files

Browse files
.gitattributes CHANGED
@@ -21,3 +21,4 @@ banking_77/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
21
  terms_of_service/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
22
  neurips_impact_statement_risks/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
23
  overruling/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
 
 
21
  terms_of_service/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
22
  neurips_impact_statement_risks/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
23
  overruling/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
24
+ systematic_review_inclusion/test-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -191,6 +191,34 @@ dataset_info:
191
  num_examples: 2350
192
  download_size: 277926
193
  dataset_size: 439214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  - config_name: tai_safety_research
195
  features:
196
  - name: Title
@@ -272,6 +300,12 @@ configs:
272
  path: overruling/train-*
273
  - split: test
274
  path: overruling/test-*
 
 
 
 
 
 
275
  - config_name: tai_safety_research
276
  data_files:
277
  - split: train
 
191
  num_examples: 2350
192
  download_size: 277926
193
  dataset_size: 439214
194
+ - config_name: systematic_review_inclusion
195
+ features:
196
+ - name: Title
197
+ dtype: string
198
+ - name: Abstract
199
+ dtype: string
200
+ - name: Authors
201
+ dtype: string
202
+ - name: Journal
203
+ dtype: string
204
+ - name: ID
205
+ dtype: int32
206
+ - name: Label
207
+ dtype:
208
+ class_label:
209
+ names:
210
+ '0': Unlabeled
211
+ '1': included
212
+ '2': not included
213
+ splits:
214
+ - name: train
215
+ num_bytes: 52677
216
+ num_examples: 50
217
+ - name: test
218
+ num_bytes: 2344244
219
+ num_examples: 2243
220
+ download_size: 1357407
221
+ dataset_size: 2396921
222
  - config_name: tai_safety_research
223
  features:
224
  - name: Title
 
300
  path: overruling/train-*
301
  - split: test
302
  path: overruling/test-*
303
+ - config_name: systematic_review_inclusion
304
+ data_files:
305
+ - split: train
306
+ path: systematic_review_inclusion/train-*
307
+ - split: test
308
+ path: systematic_review_inclusion/test-*
309
  - config_name: tai_safety_research
310
  data_files:
311
  - split: train
dataset_infos.json CHANGED
@@ -414,49 +414,38 @@
414
  "features": {
415
  "Title": {
416
  "dtype": "string",
417
- "id": null,
418
  "_type": "Value"
419
  },
420
  "Abstract": {
421
  "dtype": "string",
422
- "id": null,
423
  "_type": "Value"
424
  },
425
  "Authors": {
426
  "dtype": "string",
427
- "id": null,
428
  "_type": "Value"
429
  },
430
  "Journal": {
431
  "dtype": "string",
432
- "id": null,
433
  "_type": "Value"
434
  },
435
  "ID": {
436
  "dtype": "int32",
437
- "id": null,
438
  "_type": "Value"
439
  },
440
  "Label": {
441
- "num_classes": 3,
442
  "names": [
443
  "Unlabeled",
444
  "included",
445
  "not included"
446
  ],
447
- "names_file": null,
448
- "id": null,
449
  "_type": "ClassLabel"
450
  }
451
  },
452
- "post_processed": null,
453
- "supervised_keys": null,
454
- "task_templates": null,
455
- "builder_name": "raft",
456
  "config_name": "systematic_review_inclusion",
457
  "version": {
458
  "version_str": "1.1.0",
459
- "description": null,
460
  "major": 1,
461
  "minor": 1,
462
  "patch": 0
@@ -464,111 +453,20 @@
464
  "splits": {
465
  "train": {
466
  "name": "train",
467
- "num_bytes": 52693,
468
  "num_examples": 50,
469
- "dataset_name": "raft"
470
  },
471
  "test": {
472
  "name": "test",
473
- "num_bytes": 2344260,
474
  "num_examples": 2243,
475
- "dataset_name": "raft"
476
- }
477
- },
478
- "download_checksums": {
479
- "data/ade_corpus_v2/train.csv": {
480
- "num_bytes": 7788,
481
- "checksum": "b6c1268b8ce0fbf4fcaa0ee88562354b65d7ebd2527c3c3cc875e3a47757ae36"
482
- },
483
- "data/ade_corpus_v2/test_unlabeled.csv": {
484
- "num_bytes": 661890,
485
- "checksum": "28981e898281755e1c2d460ab75cceb8f9d8ce0a88ff4636153cf8c393c6f99e"
486
- },
487
- "data/banking_77/train.csv": {
488
- "num_bytes": 3914,
489
- "checksum": "8c417cea66d12569cb3057b125a98358aea0642dda67cf6deea52d4ec1c28ae1"
490
- },
491
- "data/banking_77/test_unlabeled.csv": {
492
- "num_bytes": 327152,
493
- "checksum": "aebd62e56a3b8ebf618afa519cc261fedc7700d96053357ebff83fab44bbaeac"
494
- },
495
- "data/terms_of_service/train.csv": {
496
- "num_bytes": 11528,
497
- "checksum": "8b46784b1e601d753ad44ce33fced828ec3bea95b6b575a2195322ead807f6c7"
498
- },
499
- "data/terms_of_service/test_unlabeled.csv": {
500
- "num_bytes": 917272,
501
- "checksum": "491679e8c9303f825d341dcdfb0a5779a9d2f925744cc44c0039b219abf7d83f"
502
- },
503
- "data/tai_safety_research/train.csv": {
504
- "num_bytes": 54758,
505
- "checksum": "fac15dde8a9f5536424289aa707dcfe1fa3128a7ee2d813cf3251c53092f7dbc"
506
- },
507
- "data/tai_safety_research/test_unlabeled.csv": {
508
- "num_bytes": 1594375,
509
- "checksum": "e606d7858813490507d90d3d17830c6f1ebd8c777fc4a9f865699e375be355d0"
510
- },
511
- "data/neurips_impact_statement_risks/train.csv": {
512
- "num_bytes": 70008,
513
- "checksum": "76cd6aa2707cd99445ba69f8c392a89faa4bf718e967b07dc457f88529b3ff2e"
514
- },
515
- "data/neurips_impact_statement_risks/test_unlabeled.csv": {
516
- "num_bytes": 196429,
517
- "checksum": "5e4d1f313f92dad3e06e035eaff044f818b20ce1ed4d22fcd17e7756e54089a7"
518
- },
519
- "data/overruling/train.csv": {
520
- "num_bytes": 7585,
521
- "checksum": "617061ba0dbd501d74896ccf2f9ebed00c35c4c4057f74afdd94c96cb514130e"
522
- },
523
- "data/overruling/test_unlabeled.csv": {
524
- "num_bytes": 412483,
525
- "checksum": "c0a61fc8980b544611f8d662c752d88e2ed635146f2f3c72f16eec60e3ddb7fe"
526
- },
527
- "data/systematic_review_inclusion/train.csv": {
528
- "num_bytes": 52491,
529
- "checksum": "2d2acd76f6acf1fd1359cf07323586fa134bbe76689e112f5c213b63271e2318"
530
- },
531
- "data/systematic_review_inclusion/test_unlabeled.csv": {
532
- "num_bytes": 2309265,
533
- "checksum": "20710c15f7fda010a3a86a0eda52c5ceb6a709b626610d19e1fc353c42c2d955"
534
- },
535
- "data/one_stop_english/train.csv": {
536
- "num_bytes": 201489,
537
- "checksum": "8c0102ea703677c23c3525e3bc7504c4b734d9298c5d3a25403d274034cb9d89"
538
- },
539
- "data/one_stop_english/test_unlabeled.csv": {
540
- "num_bytes": 2085747,
541
- "checksum": "31e299f91925647744e998f221c0f66c2719df297a20d36d993c8892616a4a53"
542
- },
543
- "data/tweet_eval_hate/train.csv": {
544
- "num_bytes": 7642,
545
- "checksum": "ab091c90e8afbb0dabe17cc6be93b1b80fd914e00a12d66b2dd7b194f355ba65"
546
- },
547
- "data/tweet_eval_hate/test_unlabeled.csv": {
548
- "num_bytes": 412052,
549
- "checksum": "707e720c60f8beb359301994c691a609367cf3bf27d0c573d22f9e47ecc63204"
550
- },
551
- "data/twitter_complaints/train.csv": {
552
- "num_bytes": 5376,
553
- "checksum": "365b067c2d2a7f30128d766baf4901936e047295bb2a693878164dbb84d828fa"
554
- },
555
- "data/twitter_complaints/test_unlabeled.csv": {
556
- "num_bytes": 336272,
557
- "checksum": "7f6f975ec98a5b467b39c487bbc711b29e177119d12c49c79b31faba83609082"
558
- },
559
- "data/semiconductor_org_types/train.csv": {
560
- "num_bytes": 8120,
561
- "checksum": "b7e238cbfd0ed518c222b64e8656599ed13872de135c6e38d6f6123c355d2f64"
562
- },
563
- "data/semiconductor_org_types/test_unlabeled.csv": {
564
- "num_bytes": 68529,
565
- "checksum": "141d8b36685475310c98cdb16099acbf04498d80349f4434c2201deffd271f24"
566
  }
567
  },
568
- "download_size": 9752165,
569
- "post_processing_size": null,
570
- "dataset_size": 2396953,
571
- "size_in_bytes": 12149118
572
  },
573
  "one_stop_english": {
574
  "description": "Large pre-trained language models have shown promise for few-shot learning, completing text-based tasks given only a few task-specific examples. Will models soon solve classification tasks that have so far been reserved for human research assistants? \n\n[RAFT](https://raft.elicit.org) is a few-shot classification benchmark that tests language models:\n\n- across multiple domains (lit review, tweets, customer interaction, etc.)\n- on economically valuable classification tasks (someone inherently cares about the task)\n- in a setting that mirrors deployment (50 examples per task, info retrieval allowed, hidden test set)\n",
 
414
  "features": {
415
  "Title": {
416
  "dtype": "string",
 
417
  "_type": "Value"
418
  },
419
  "Abstract": {
420
  "dtype": "string",
 
421
  "_type": "Value"
422
  },
423
  "Authors": {
424
  "dtype": "string",
 
425
  "_type": "Value"
426
  },
427
  "Journal": {
428
  "dtype": "string",
 
429
  "_type": "Value"
430
  },
431
  "ID": {
432
  "dtype": "int32",
 
433
  "_type": "Value"
434
  },
435
  "Label": {
 
436
  "names": [
437
  "Unlabeled",
438
  "included",
439
  "not included"
440
  ],
 
 
441
  "_type": "ClassLabel"
442
  }
443
  },
444
+ "builder_name": "parquet",
445
+ "dataset_name": "raft",
 
 
446
  "config_name": "systematic_review_inclusion",
447
  "version": {
448
  "version_str": "1.1.0",
 
449
  "major": 1,
450
  "minor": 1,
451
  "patch": 0
 
453
  "splits": {
454
  "train": {
455
  "name": "train",
456
+ "num_bytes": 52677,
457
  "num_examples": 50,
458
+ "dataset_name": null
459
  },
460
  "test": {
461
  "name": "test",
462
+ "num_bytes": 2344244,
463
  "num_examples": 2243,
464
+ "dataset_name": null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
  }
466
  },
467
+ "download_size": 1357407,
468
+ "dataset_size": 2396921,
469
+ "size_in_bytes": 3754328
 
470
  },
471
  "one_stop_english": {
472
  "description": "Large pre-trained language models have shown promise for few-shot learning, completing text-based tasks given only a few task-specific examples. Will models soon solve classification tasks that have so far been reserved for human research assistants? \n\n[RAFT](https://raft.elicit.org) is a few-shot classification benchmark that tests language models:\n\n- across multiple domains (lit review, tweets, customer interaction, etc.)\n- on economically valuable classification tasks (someone inherently cares about the task)\n- in a setting that mirrors deployment (50 examples per task, info retrieval allowed, hidden test set)\n",
systematic_review_inclusion/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21b2002765f4fb4db9485fa611c2d41a2f5ffe3695a765165d2f1652ed783d80
3
+ size 1319927
systematic_review_inclusion/train-00000-of-00001.parquet ADDED
Binary file (37.5 kB). View file