import os import fsspec import datasets import random _CITATION = """\ @InProceedings{Nguyen_2024_WACV, author = {Nguyen, Tai D. and Fang, Shengbang and Stamm, Matthew C.}, title = {VideoFACT: Detecting Video Forgeries Using Attention, Scene Context, and Forensic Traces}, booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)}, month = {January}, year = {2024}, pages = {8563-8573} } """ _DESCRIPTION = """\ This dataset is a collection of simple and traditional localized video manipulations, such as: splicing, color correction, contrast enhancement, bluring, and noise addition. The dataset is designed to be used for training and evaluating video manipulation detection models. We used this dataset to train the VideoFACT model, which is a deep learning model that uses attention, scene context, and forensic traces to detect a wide variety of video forgery types, i.e. splicing, editing, deepfake, inpainting. The dataset is divided into three parts: Video Camera Model Splicing (VCMS), Video Perceptually Visible Manipulation (VPVM), and Video Perceptually Invisible Manipulation (VPIM). Each part has a total of 4000 videos, each video is 1 second, or 30 frames, has a resolution of 1920 x 1080, and encoded using FFmpeg with the H.264 codec at CRF 23. Additionally, each part is splited into training, validation, and testing sets that consists of 3200, 200, 600 videos, respectively. More details about the dataset can be found in the paper. """ _HOMEPAGE = "https://github.com/ductai199x/videofact-wacv-2024" _LICENSE = "Licensed under a Creative Commons Attribution-NonCommercial 4.0 International for Non-commercial use only. Any commercial use should get formal permission first." _URLS = { "vcms": "https://huggingface.co/datasets/ductai199x/video_std_manip/resolve/main/vcms.zip", "vpvm": "https://huggingface.co/datasets/ductai199x/video_std_manip/resolve/main/vpvm.zip", "vpim": "https://huggingface.co/datasets/ductai199x/video_std_manip/resolve/main/vpim.zip", } fsspec_open_file = lambda path, mode: fsspec.open(path, mode).open() class VideoStdManip(datasets.GeneratorBasedBuilder): """This dataset is a collection of simple and traditional localized video manipulations, such as: splicing, color correction, contrast enhancement, bluring, and noise addition. The dataset is divided into three parts: Video Camera Model Splicing (VCMS), Video Perceptually Visible Manipulation (VPVM), and Video Perceptually Invisible Manipulation (VPIM).""" VERSION = datasets.Version("1.0.0") # This is an example of a dataset with multiple configurations. # If you don't want/need to define several sub-sets in your dataset, # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. # If you need to make complex sub-parts in the datasets with configurable options # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig # BUILDER_CONFIG_CLASS = MyBuilderConfig # You will be able to load one or the other configurations in the following list with # data = datasets.load_dataset('my_dataset', 'first_domain') # data = datasets.load_dataset('my_dataset', 'second_domain') BUILDER_CONFIGS = [ datasets.BuilderConfig(name="vcms", version=VERSION, description="This is the VCMS part of the dataset"), datasets.BuilderConfig(name="vpvm", version=VERSION, description="This is the VPVM part of the dataset"), datasets.BuilderConfig(name="vpim", version=VERSION, description="This is the VPIM part of the dataset"), ] # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): features = datasets.Features( { "vid_path": datasets.Value("string"), "mask_path": datasets.Value("string"), "label": datasets.ClassLabel(num_classes=2), # These are the features of your dataset like images, labels ... } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("frames", "masks", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive part_name = self.config.name urls = _URLS[part_name] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, part_name, "train_ids.txt"), }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, part_name, "val_ids.txt"), }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, part_name, "test_ids.txt"), }, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath): # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. with open(filepath, "r") as f: vid_ids = f.read().splitlines() manip_vid_ids = [("manip", id_) for id_ in vid_ids] real_vid_ids = [("real", id_) for id_ in vid_ids] all_vid_ids = manip_vid_ids + real_vid_ids random.seed(2024) random.shuffle(all_vid_ids) part_dir = os.path.dirname(filepath) for key, (label, vid_id) in enumerate(all_vid_ids): label = 0 if label == "real" else 1 if label == 1: vid_path = os.path.join(part_dir, "manipulated", vid_id + ".mp4") mask_path = os.path.join(part_dir, "mask", vid_id + ".mp4") else: vid_path = os.path.join(part_dir, "original", vid_id + ".mp4") mask_path = "" yield key, { "vid_path": vid_path, "mask_path": mask_path, "label": label, }