jmhb commited on
Commit
656f976
·
verified ·
1 Parent(s): cbd922c

Create load_viddiff_dataset.py

Browse files
Files changed (1) hide show
  1. load_viddiff_dataset.py +368 -0
load_viddiff_dataset.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ipdb
2
+ import pdb
3
+ import os
4
+ import numpy as np
5
+ import json
6
+ import re
7
+ from PIL import Image
8
+ from pathlib import Path
9
+ from datasets import load_dataset
10
+ import decord
11
+ from tqdm import tqdm
12
+ import logging
13
+ import hashlib
14
+
15
+
16
+ def load_viddiff_dataset(splits=["easy"], subset_mode="0", cache_dir=None, test_new=False):
17
+ """
18
+ splits in ['easy', 'medium', 'hard']
19
+ """
20
+ if not test_new:
21
+ dataset = load_dataset("viddiff/VidDiffBench_2", cache_dir=cache_dir)
22
+ dataset = dataset['test']
23
+ valid_splits = set(dataset['split'])
24
+ else:
25
+ dataset = load_dataset("viddiff/VidDiffBench_2", cache_dir=cache_dir)
26
+ dataset = dataset['test']
27
+ dataset = dataset.map(lambda example: example.update({'split': example['domain']}) or example)
28
+ valid_splits = set(dataset['split'])
29
+
30
+ def _filter_splits(example):
31
+ return example["split"] in splits
32
+
33
+ dataset = dataset.filter(_filter_splits)
34
+ if len(dataset) == 0:
35
+ raise ValueError(
36
+ f"Dataset empty for splits {splits}. Valid splits {valid_splits}")
37
+
38
+ def _map_elements_to_json(example):
39
+ example["videos"] = json.loads(example["videos"])
40
+ example["differences_annotated"] = json.loads(
41
+ example["differences_annotated"])
42
+ example["differences_gt"] = json.loads(example["differences_gt"])
43
+ return example
44
+
45
+ dataset = dataset.map(_map_elements_to_json)
46
+ # dataset = dataset.map(_clean_annotations)
47
+ dataset = apply_subset_mode(dataset, subset_mode)
48
+
49
+ dataset = _get_difficulty_splits(dataset)
50
+
51
+ return dataset
52
+
53
+
54
+ def _get_difficulty_splits(dataset):
55
+ with open("data/lookup_action_to_split.json", "r") as fp:
56
+ lookup_action_to_split = json.load(fp)
57
+
58
+ def add_split_difficulty(example):
59
+ example['split_difficulty'] = lookup_action_to_split[example['action']]
60
+ return example
61
+
62
+ dataset = dataset.map(add_split_difficulty)
63
+ return dataset
64
+
65
+
66
+ def load_all_videos(dataset,
67
+ cache=True,
68
+ cache_dir="cache/cache_data",
69
+ overwrite_cache=False,
70
+ test_samevideo=0,
71
+ test_flipvids=0,
72
+ do_tqdm=True):
73
+ """
74
+ Return a 2-element tuple. Each element is a list of length len(datset).
75
+ First list is video A for each datapoint as a dict with elements
76
+ path: original path to video
77
+ fps: frames per second
78
+ video: numpy array of the video shape (nframes,H,W,3)
79
+ Second list is the same but for video B.
80
+
81
+ Args:
82
+ cache_dir (str): Directory to store cached video data. Defaults to "cache/cache_data"
83
+ """
84
+
85
+ all_videos = ([], [])
86
+ # make iterator, with or without tqdm based on `do_tqdm`
87
+ if do_tqdm:
88
+ it = tqdm(dataset)
89
+ else:
90
+ it = dataset
91
+
92
+ # load each video
93
+ for row in it:
94
+ videos = get_video_data(row['videos'],
95
+ cache=cache,
96
+ cache_dir=cache_dir,
97
+ overwrite_cache=overwrite_cache)
98
+
99
+ video0, video1 = videos[0], videos[1]
100
+
101
+ if test_flipvids:
102
+ video0, video1 = video1, video0
103
+
104
+ if not test_samevideo:
105
+ all_videos[0].append(video0)
106
+ all_videos[1].append(video1)
107
+ else:
108
+ all_videos[0].append(video1)
109
+ all_videos[1].append(video1)
110
+
111
+ return all_videos
112
+
113
+
114
+ def _clean_annotations(example):
115
+ # Not all differences in the taxonomy may have a label available, so filter them.
116
+
117
+ differences_gt_labeled = {
118
+ k: v
119
+ for k, v in example['differences_gt'].items() if v is not None
120
+ }
121
+ differences_annotated = {
122
+ k: v
123
+ for k, v in example['differences_annotated'].items()
124
+ if k in differences_gt_labeled.keys()
125
+ }
126
+
127
+ # Directly assign to the example without deepcopy
128
+ example['differences_gt'] = differences_gt_labeled
129
+ example['differences_annotated'] = differences_annotated
130
+
131
+ return example
132
+
133
+
134
+ def get_video_data(videos: dict, cache=True, cache_dir="cache/cache_data", overwrite_cache=False):
135
+ """
136
+ Pass in the videos dictionary from the dataset, like dataset[idx]['videos'].
137
+ Load the 2 videos represented as numpy arrays.
138
+ By default, cache the arrays ... so the second time through, the dataset
139
+ loading will be faster.
140
+
141
+ returns: video0, video1
142
+ """
143
+ video_dicts = []
144
+
145
+ for i in [0, 1]:
146
+ path = videos[i]['path']
147
+ assert Path(path).exists(
148
+ ), f"Video not downloaded [{path}]\nCheck dataset README about downloading videos"
149
+ frames_trim = slice(*videos[i]['frames_trim'])
150
+
151
+ video_dict = videos[i].copy()
152
+
153
+ if cache:
154
+ dir_cache = Path(cache_dir)
155
+ dir_cache.mkdir(exist_ok=True, parents=True)
156
+ hash_key = get_hash_key(path + str(frames_trim))
157
+ memmap_filename = dir_cache / f"memmap_{hash_key}.npy"
158
+
159
+ # if not in the cache, and not overwriting, then get OG video
160
+ if os.path.exists(memmap_filename) and not overwrite_cache:
161
+ video_info = np.load(f"{memmap_filename}.info.npy",
162
+ allow_pickle=True).item()
163
+ video = np.memmap(memmap_filename,
164
+ dtype=video_info['dtype'],
165
+ mode='r',
166
+ shape=video_info['shape'])
167
+ video_dict['video'] = video
168
+ video_dict['fps'] = video_dict['fps_original'] # since we don't downsample here
169
+ video_dicts.append(video_dict)
170
+ continue
171
+
172
+ is_dir = Path(path).is_dir()
173
+ if is_dir:
174
+ video = _load_video_from_directory_of_images(
175
+ path, frames_trim=frames_trim)
176
+
177
+ else:
178
+ assert Path(path).suffix in (".mp4", ".mov")
179
+ video, fps = _load_video(path, frames_trim=frames_trim)
180
+ assert fps == videos[i]['fps_original']
181
+
182
+ if cache:
183
+ np.save(f"{memmap_filename}.info.npy", {
184
+ 'shape': video.shape,
185
+ 'dtype': video.dtype
186
+ })
187
+ memmap = np.memmap(memmap_filename,
188
+ dtype=video.dtype,
189
+ mode='w+',
190
+ shape=video.shape)
191
+ memmap[:] = video[:]
192
+ memmap.flush()
193
+ video = memmap
194
+
195
+ video_dict['video'] = video
196
+ video_dict['fps'] = video_dict['fps_original']
197
+ video_dicts.append(video_dict)
198
+
199
+ return video_dicts
200
+
201
+
202
+ def _load_video(f, return_fps=True, frames_trim: slice = None) -> np.ndarray:
203
+ """
204
+ mp4 video to frames numpy array shape (N,H,W,3).
205
+ Do not use for long videos
206
+ frames_trim: (s,e) is start and end int frames to include (warning, the range
207
+ is inclusive, unlike in list indexing.)
208
+ """
209
+ vid = decord.VideoReader(str(f))
210
+ fps = vid.get_avg_fps()
211
+
212
+ if len(vid) > 50000:
213
+ raise ValueError(
214
+ "Video probably has too many frames to convert to a numpy")
215
+
216
+ if frames_trim is None:
217
+ frames_trim = slice(0, None, None)
218
+ video_np = vid[frames_trim].asnumpy()
219
+
220
+ if not return_fps:
221
+ return video_np
222
+ else:
223
+ assert fps > 0
224
+ return video_np, fps
225
+
226
+
227
+ def _load_video_from_directory_of_images(
228
+ path_dir: str,
229
+ frames_trim: slice = None,
230
+ downsample_time: int = None,
231
+ ) -> np.ndarray:
232
+ """
233
+
234
+ `path_dir` is a directory path with images that, when arranged in alphabetical
235
+ order, make a video.
236
+ This function returns the a numpy array shape (N,H,W,3) where N is the
237
+ number of frames.
238
+ """
239
+ files = sorted(os.listdir(path_dir))
240
+
241
+ if frames_trim is not None:
242
+ files = files[frames_trim]
243
+
244
+ if downsample_time is not None:
245
+ files = files[::downsample_time]
246
+
247
+ files = [f"{path_dir}/{f}" for f in files]
248
+ images = [Image.open(f) for f in files]
249
+
250
+ video_array = np.stack(images)
251
+
252
+ return video_array
253
+
254
+
255
+ def _subsample_video(video: np.ndarray,
256
+ fps_original: int,
257
+ fps_target: int,
258
+ fps_warning: bool = True):
259
+ """
260
+ video: video as numby array (nframes, h, w, 3)
261
+ fps_original: original fps of the video
262
+ fps_target: target fps to downscale to
263
+ fps_warning: if True, then log warnings to logger if the target fps is
264
+ higher than original fps, or if the target fps isn't possible because
265
+ it isn't divisible by the original fps.
266
+ """
267
+ subsample_time = fps_original / fps_target
268
+
269
+ if subsample_time < 1 and fps_warning:
270
+ logging.warning(f"Trying to subsample frames to fps {fps_target}, which "\
271
+ "is higher than the fps of the original video which is "\
272
+ "{video['fps']}. The video fps won't be changed for {video['path']}. "\
273
+ f"\nSupress this warning by setting config fps_warning=False")
274
+ return video, fps_original, 1
275
+
276
+ subsample_time_int = int(subsample_time)
277
+ fps_new = int(fps_original / subsample_time_int)
278
+ if fps_new != fps_target and fps_warning:
279
+ logging.warning(f"Config lmm.fps='{fps_target}' but the original fps is {fps_original} " \
280
+ f"so we downscale to fps {fps_new} instead. " \
281
+ f"\nSupress this warning by setting config fps_warning=False")
282
+
283
+ video = video[::subsample_time_int]
284
+
285
+ return video, fps_new, subsample_time_int
286
+
287
+ def downsample_videos(dataset, videos, args_fps_inference, fps_warning=True):
288
+ """To fix some hacky - oOnly called by viddiff_method.run_viddiff.py """
289
+ for i in range(len(dataset)):
290
+ row = dataset[i]
291
+ domain = row['domain']
292
+ fps_inference = args_fps_inference[domain]
293
+ video0, video1 = videos[0][i], videos[1][i]
294
+ for video in (video0, video1):
295
+ video['video'], fps_new, subsample_time_int = _subsample_video(
296
+ video['video'], video['fps_original'], fps_inference, fps_warning)
297
+ video['fps'] = fps_new
298
+
299
+ return videos
300
+
301
+
302
+ def apply_subset_mode(dataset, subset_mode):
303
+ """
304
+ For example if subset_mode is "3_per_action" then just get the first 3 rows
305
+ for each unique action.
306
+ Useful for working with subsets.
307
+ """
308
+ match = re.match(r"(\d+)_per_action", subset_mode)
309
+ if match:
310
+ instances_per_action = int(match.group(1))
311
+ action_counts = {}
312
+ subset_indices = []
313
+
314
+ for idx, example in enumerate(dataset):
315
+ action = example['action']
316
+ if action not in action_counts:
317
+ action_counts[action] = 0
318
+
319
+ if action_counts[action] < instances_per_action:
320
+ subset_indices.append(idx)
321
+ action_counts[action] += 1
322
+
323
+ return dataset.select(subset_indices)
324
+ else:
325
+ return dataset
326
+
327
+
328
+ def get_hash_key(key: str) -> str:
329
+ return hashlib.sha256(key.encode()).hexdigest()
330
+
331
+
332
+ def dataset_metrics(dataset):
333
+ import pandas as pd
334
+ df = pd.DataFrame(dataset)
335
+ print("Number of actions ")
336
+ print(df.groupby(['split'])['action'].nunique())
337
+ print("Total actions", df['action'].nunique())
338
+
339
+ print("Samples by category")
340
+ print(df.groupby(["split"])['split'].count())
341
+ print("Total ", len(df))
342
+ print()
343
+
344
+ diffs = []
345
+ for row in dataset:
346
+ diff = {
347
+ k: v
348
+ for k, v in row['differences_gt'].items() if v is not None
349
+ }
350
+ diffs.append(diff)
351
+ cnts = [len(d) for d in diffs]
352
+ df['variation_cnts'] = cnts
353
+ print("Variation counts by category")
354
+ print(df.groupby(['split'])['variation_cnts'].sum())
355
+ print("total ", df['variation_cnts'].sum())
356
+
357
+
358
+ print()
359
+
360
+
361
+ if __name__ == "__main__":
362
+ # these are the 3 data loading commands
363
+ splits = ['ballsports', 'fitness', 'diving', 'music', 'surgery']
364
+ dataset = load_viddiff_dataset(splits=splits)
365
+ metrics = dataset_metrics(dataset)
366
+
367
+ videos = load_all_videos(dataset)
368
+ n_differences = lvd.get_n_differences(dataset, "data/n_differences.json")