Datasets:
File size: 4,294 Bytes
e5572f0 64d2476 e5572f0 64d2476 e5572f0 64d2476 e5572f0 64d2476 e5572f0 64d2476 e5572f0 64d2476 e5572f0 58653e4 e5572f0 58653e4 e5572f0 64d2476 e5572f0 58653e4 94668dc e5572f0 64d2476 e5572f0 58653e4 64d2476 58653e4 64d2476 e5572f0 58653e4 64d2476 58653e4 e5572f0 64d2476 e5572f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {selfie_and_video},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
4000 people in this dataset. Each person took a selfie on a webcam,
took a selfie on a mobile phone. In addition, people recorded video from
the phone and from the webcam, on which they pronounced a given set of numbers.
Includes folders corresponding to people in the dataset. Each folder includes
8 files (4 images and 4 videos).
"""
_NAME = 'selfie_and_video'
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class SelfieAndVideo(datasets.GeneratorBasedBuilder):
"""Small sample of image-text pairs"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
'photo_1': datasets.Image(),
'photo_2': datasets.Image(),
'video_3': datasets.Value('string'),
'video_4': datasets.Value('string'),
'photo_5': datasets.Image(),
'photo_6': datasets.Image(),
'video_7': datasets.Value('string'),
'video_8': datasets.Value('string'),
'set_id': datasets.Value('string'),
'worker_id': datasets.Value('string'),
'age': datasets.Value('int8'),
'country': datasets.Value('string'),
'gender': datasets.Value('string')
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}data.tar.gz")
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
images = dl_manager.iter_archive(images)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
'annotations': annotations
}),
]
def _generate_examples(self, images, annotations):
annotations_df = pd.read_csv(annotations, sep=';')
images_data = pd.DataFrame(columns=['Link', 'Bytes'])
for idx, (image_path, image) in enumerate(images):
if image_path.lower().endswith('.jpg'):
images_data.loc[idx] = {
'Link': image_path,
'Bytes': image.read()
}
annotations_df = pd.merge(annotations_df,
images_data,
on=['Link'],
how='left')
for idx, worker_id in enumerate(pd.unique(annotations_df['WorkerId'])):
annotation = annotations_df.loc[annotations_df['WorkerId'] ==
worker_id]
annotation = annotation.sort_values(['Link'])
data = {
(f'photo_{row[7][37]}' if row[7].lower().endswith('.jpg') else f'video_{row[7][37]}'):
({
'path': row[7],
'bytes': row[8]
} if row[7].lower().endswith('.jpg') else row[7])
for row in annotation.itertuples()
}
age = annotation.loc[annotation['Link'].str.lower().str.endswith(
'1.jpg')]['Age'].values[0]
country = annotation.loc[annotation['Link'].str.lower().str.
endswith('1.jpg')]['Country'].values[0]
gender = annotation.loc[annotation['Link'].str.lower().str.
endswith('1.jpg')]['Gender'].values[0]
set_id = annotation.loc[annotation['Link'].str.lower().str.
endswith('1.jpg')]['SetId'].values[0]
data['worker_id'] = worker_id
data['age'] = age
data['country'] = country
data['gender'] = gender
data['set_id'] = set_id
yield idx, data
|