-
Notifications
You must be signed in to change notification settings - Fork 5
/
bulk_run.py
91 lines (72 loc) · 3.23 KB
/
bulk_run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
import argparse
import sys
import pandas as pd
from tqdm import tqdm
from bson import json_util as json
from src import clusterize, database
from src.tracker import Tracker
from src.classifier import FacerecClassifier
from src.utils import uri_utils
os.makedirs('database', exist_ok=True)
database.init()
def main(input, project, speed=25, skip_tracking=False):
if not skip_tracking:
# TODO check if input is a csv or a folder
df = pd.read_csv(input)
tr = Tracker(project=project)
all_results = []
v = None
old = None
for i, x in tqdm(df.iterrows(), total=len(df)):
if 'type' in x and x['type'] != 'VIDEO':
continue
start = int(x['start']) if 'start' in x else None
end = int(x['end']) if 'end' in x else 10000
fragment = f'{start},{end + 1}' if start is not None else None
if 'media' in x:
media = x['media']
video_id = x['media']
if media != old:
v, metadata = uri_utils.uri2video(media)
database.save_metadata(metadata)
else:
v = './video/' + x['Name']
video_id = x['kgURI']
_, metadata = uri_utils.uri2video(video_id)
video_id = metadata['locator']
database.save_metadata(metadata)
database.clean_analysis(video_id, project)
database.save_status(video_id, project, 'RUNNING')
res = tr.run(v, export_frames=True, fragment=fragment, video_id=video_id,
video_speedup=speed, verbose=False, cluster_features=False)
all_results.append(res)
with open(f'results_{project}.json', 'w') as f:
f.write(json.dumps(all_results))
else:
with open(f'results_{project}.json', 'r') as f:
all_results = f.read(json.loads(f))
clusters = []
for r in all_results:
c = clusterize.main(clusterize.from_dict(r), dominant_ratio=0.6, weighted_dominant_ratio=0.4,
confidence_threshold=0.6, merge_cluster=True, min_length=1)
clusters.append(c)
with open(f'results_{project}_clusters.json', 'w') as f:
f.write(json.dumps(clusters))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True,
help='The csv containing the videos to analyse.')
parser.add_argument('--project', type=str, default='general',
help='Name of the collection to be part of')
parser.add_argument('--speed', type=int, default=25,
help='Speed up for the video')
parser.add_argument('--skip_tracking', action='store_true', default=False,
help='Only recompute clustering')
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args.input, args.project, args.speed, args.skip_tracking)
# python bulk_run.py -i evaluation/dataset_antract.csv --project antract
# python bulk_run.py -i evaluation/dataset_memad.csv --project memad
# python bulk_run.py -i evaluation/dataset_memad.csv --project memad_gt