forked from CrypticSignal/video-quality-metrics
-
Notifications
You must be signed in to change notification settings - Fork 0
/
metrics.py
90 lines (73 loc) · 2.88 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import json
import os
import matplotlib.pyplot as plt
import numpy as np
from utils import force_decimal_places, line, Logger, plot_graph, get_metrics_list
log = Logger("save_metrics")
def get_metrics_save_table(
comparison_table,
json_file_path,
args,
decimal_places,
data_for_current_row,
table,
output_folder,
time_taken,
crf_or_preset=None,
):
with open(json_file_path, "r") as f:
file_contents = json.load(f)
frames = file_contents["frames"]
frame_numbers = [frame["frameNum"] for frame in frames]
# Maps the metric type to the corresponding JSON metric key.
metric_lookup = {
"VMAF": "vmaf",
"PSNR": "psnr_y",
"SSIM": "float_ssim",
"MS-SSIM": "float_ms_ssim"
}
# Only used for accessing the VMAF mean score to return at the end of this method.
collected_scores = {}
# Process metrics captured for each requested metric type.
metrics_list = get_metrics_list(args)
for metric_type in metrics_list:
metric_key = metric_lookup[metric_type]
if frames[0]["metrics"][metric_key]:
# Get the <metric_type> score of each frame from the JSON file created by libvmaf.
metric_scores = [frame["metrics"][metric_key] for frame in frames]
# Calculate the mean, minimum and standard deviation scores across all frames.
mean_score = force_decimal_places(np.mean(metric_scores), decimal_places)
min_score = force_decimal_places(min(metric_scores), decimal_places)
std_score = force_decimal_places(np.std(metric_scores), decimal_places)
collected_scores[metric_type] = {
"min": min_score,
"std": std_score,
"mean": mean_score
}
log.info(f"Creating {metric_type} graph...")
plot_graph(
f"{metric_type}\nn_subsample: {args.subsample}",
"Frame Number",
metric_type,
frame_numbers,
metric_scores,
mean_score,
os.path.join(output_folder, metric_type),
)
# Add the <metric_type> values to the table.
data_for_current_row.append(f"{min_score} | {std_score} | {mean_score}")
if not args.no_transcoding_mode:
data_for_current_row.insert(0, crf_or_preset)
data_for_current_row.insert(1, time_taken)
table.add_row(data_for_current_row)
collected_metric_types = '/'.join(metrics_list)
table_title = (
f"{collected_metric_types} values are in the format: Min | Standard Deviation | Mean"
)
# Write the table to the Table.txt file.
with open(comparison_table, "w") as f:
f.write(f"{table_title}\n")
f.write(table.get_string())
log.info(f"{comparison_table} has been updated.")
line()
return float(collected_scores["VMAF"]["mean"])