From a34037e0c03b309ad7103b17cb20aaf6a64dd967 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:02:06 +0800 Subject: [PATCH 01/79] feat: initial impl of taking photos automatically for all gst sources --- .../base/bin/camera_test_auto_gst_source.py | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100755 providers/base/bin/camera_test_auto_gst_source.py diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py new file mode 100755 index 0000000000..6c09797b51 --- /dev/null +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -0,0 +1,227 @@ +#! /usr/bin/python3 + +import sys +import gi +from argparse import ArgumentParser +import typing as T +import re + +# https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py + +gi.require_version("Gst", "1.0") +from gi.repository import Gst # type: ignore + +gi.require_version("GLib", "2.0") +from gi.repository import GLib # type: ignore + +Gst.init(None) + + +def get_devices() -> T.List[Gst.Device]: + monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor + monitor.add_filter("Video/Source") + monitor.start() + + devices = monitor.get_devices() + + monitor.stop() + return devices + + +def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: + """Gets all the fixated(unique) caps from a Gst.Caps object + + :param caps: A mixed Gst.Caps + """ + fixated_caps = [] + for cap_str in caps.to_string().split(";"): + mixed_caps = Gst.Caps.from_string(cap_str) + while not mixed_caps.is_fixed(): + # keep fixiating it until it's fixed + fixated_cap = mixed_caps.fixate() + fixated_caps.append(fixated_cap) + mixed_caps = mixed_caps.subtract(fixated_cap) + # this is useful to get around missing types + # in default gst python binding on ubuntu, like Gst.Fraction + fixated_caps.append(mixed_caps) # append tha final one + + return fixated_caps + + +def pipeline_add_many(pipeline: Gst.Pipeline, *elements: Gst.Element): + for elem in elements: + pipeline.add(elem) + + +def element_link_many(*elements: Gst.Element): + elem_list = list(elements) + for i in range(len(elem_list) - 1): + assert Gst.Element.link(elem_list[i], elem_list[i + 1]), "not linked!" + + +def element_to_str( + element: Gst.Element, + exclude=["parent"], + simple_elem=["jpegdec", "videoconvert", "videorate", "multifilesink"], +) -> str: + """Stringifies the given element + + :param element: which element to convert to str + :param exclude: which property names to exclude, defaults to ["parent"] + :return: string usable in gst-launch-1.0 + """ + properties = element.list_properties() # list[GObject.GParamSpec] + element_name = element.get_factory().get_name() + + if element_name in simple_elem: + return element_name + + prop_strings = [] # type: list[str] + for prop in properties: + if prop.name in exclude: + continue + prop_value = element.get_property(prop.name) + if hasattr(prop_value, "to_string"): + # sometimes we have a nice to_string method, prioritize this + prop_strings.append( + "{}={}".format(prop.name, prop_value.to_string()) + ) + else: + prop_strings.append( + "{}={}".format(prop.name, str(prop_value)) + ) # handle native python types + return "{} {}".format( + element_name, " ".join(prop_strings) + ) # libcamerasrc name=cam_name location=p.jpeg + + +def parse_args(): + parser = ArgumentParser() + + +def take_photo( + source: Gst.Element, + *, + caps: T.Optional[Gst.Caps] = None, + filename="/home/fgfg/photo", + delay_seconds=0, +): + # key is the name, value is the element. Ordered + elements = { + "source-capsfilter": Gst.ElementFactory.make( + "capsfilter", "source-capsfilter" + ), + "decoder": Gst.ElementFactory.make("decodebin"), + "videoconvert": Gst.ElementFactory.make("videoconvert"), + "videorate": Gst.ElementFactory.make("videorate"), + "video-rate-capsfilter": Gst.ElementFactory.make( + "capsfilter", "video-rate-capsfilter" + ), + "jpegenc": Gst.ElementFactory.make("jpegenc"), + "multifilesink": Gst.ElementFactory.make("multifilesink"), + } # type: dict[str, Gst.Element] + assert all(element is not None for element in elements.values()) + + # set properties + elements["multifilesink"].set_property( + "location", "{}.jpeg".format(filename) + ) + + if caps: + elements["source-capsfilter"].set_property("caps", caps) + # structure 0 is guaranteed to exist + mime_type = caps.get_structure(0).get_name() # type: str + if mime_type == "image/jpeg": + elements["decoder"] = Gst.ElementFactory.make("jpegdec") + assert elements["decoder"] is not None + elif mime_type == "video/x-raw": + del elements["decoder"] + else: + del elements["source-capsfilter"] + + if delay_seconds > 0 and caps: + elements["video-rate-capsfilter"].set_property( + "caps", + Gst.Caps.from_string( + "video/x-raw,framerate=1/{}".format(delay_seconds) + ), + ) + # framerate=(fraction)30/1 + framerate_match = re.search( + r"framerate=\(fraction\)(\d+)\/1", caps.to_string() + ) + if framerate_match: + num_buffers = delay_seconds * int(framerate_match.group(1)) + source.set_property("num-buffers", num_buffers) + print("Dynamically computed num-buffers={}".format(num_buffers)) + else: + source.set_property("num-buffers", 60) + print( + "Non standard framerate object: {}".format(caps.to_string()), + "Defaulting to 60 buffers", + file=sys.stderr, + ) + + else: + del elements["source-capsfilter"] + del elements["video-rate-capsfilter"] + + # link elements and create pipeline + pipeline = Gst.Pipeline() # type: Gst.Pipeline + # add many does not exist in default ubuntu gst python binding + pipeline_add_many(pipeline, source, *elements.values()) + element_link_many(source, *elements.values()) + + # print("Created pipeline") + # for elem in source, *elements.values(): + # print(element_to_str(elem)) + # print() + + main_loop = GLib.MainLoop.new( # type: GLib.MainLoop + None, False # type: ignore + ) + + def eos_handler(_, message: Gst.Message): + nonlocal pipeline + if message.type == Gst.MessageType.EOS: + print("We reached EOS!") + # use closure here since this function must take 2 parameters + # none of which can be the pipeline + pipeline.set_state(Gst.State.NULL) + main_loop.quit() + + bus = pipeline.get_bus() + bus.add_signal_watch() + bus.connect("message", eos_handler) + + print("Setting playing state") + pipeline.set_state(Gst.State.PLAYING) + + main_loop.run() + while main_loop.is_running(): + pass + + +def main(): + devices = get_devices() + for di, device in enumerate(devices): + caps = device.get_caps() + for ci, cap in enumerate(get_all_fixated_caps(caps)): + print( + "Testing", + cap.to_string(), + "for device", + device.get_display_name(), + ) + take_photo( + device.create_element(), + delay_seconds=1, + caps=cap, + filename="/home/zhongning/fgfg/photo_dev{}_cap{}".format( + di, ci + ), + ) + + +if __name__ == "__main__": + main() From b0050dbd8cca83f137c97efc8734cb2b21648b21 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 10:27:07 +0800 Subject: [PATCH 02/79] fix: avoid creating a new main loop over and over --- .../base/bin/camera_test_auto_gst_source.py | 59 +++++++++++++------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 6c09797b51..6525456806 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -15,6 +15,9 @@ from gi.repository import GLib # type: ignore Gst.init(None) +main_loop = GLib.MainLoop.new( # type: GLib.MainLoop + None, False # type: ignore +) def get_devices() -> T.List[Gst.Device]: @@ -128,6 +131,11 @@ def take_photo( ) if caps: + assert ( + caps.is_fixed() + ), '"{}" is not fixed. If caps.fixate was called before, then this is a bug in GStreamer'.format( + caps.to_string() + ) elements["source-capsfilter"].set_property("caps", caps) # structure 0 is guaranteed to exist mime_type = caps.get_structure(0).get_name() # type: str @@ -135,8 +143,11 @@ def take_photo( elements["decoder"] = Gst.ElementFactory.make("jpegdec") assert elements["decoder"] is not None elif mime_type == "video/x-raw": + # don't need a decoder for raw del elements["decoder"] + # else case is using decodebin as a fallback else: + # remove the initial capsfilter if unused del elements["source-capsfilter"] if delay_seconds > 0 and caps: @@ -146,18 +157,25 @@ def take_photo( "video/x-raw,framerate=1/{}".format(delay_seconds) ), ) - # framerate=(fraction)30/1 + # framerate=(fraction)30/1, we can assume this format + # because caps is fixated framerate_match = re.search( r"framerate=\(fraction\)(\d+)\/1", caps.to_string() ) - if framerate_match: + if framerate_match is not None: num_buffers = delay_seconds * int(framerate_match.group(1)) source.set_property("num-buffers", num_buffers) - print("Dynamically computed num-buffers={}".format(num_buffers)) + print( + "[ INFO ] Dynamically computed num-buffers={} to delay {} seconds".format( + num_buffers, delay_seconds + ) + ) else: source.set_property("num-buffers", 60) print( - "Non standard framerate object: {}".format(caps.to_string()), + "[ ERR ] Non standard framerate object: {}".format( + caps.to_string() + ), "Defaulting to 60 buffers", file=sys.stderr, ) @@ -168,23 +186,18 @@ def take_photo( # link elements and create pipeline pipeline = Gst.Pipeline() # type: Gst.Pipeline - # add many does not exist in default ubuntu gst python binding + # Gst.Pipeline.add_many and Gst.Element.link_many + # do not exist in default ubuntu gst python binding pipeline_add_many(pipeline, source, *elements.values()) element_link_many(source, *elements.values()) - # print("Created pipeline") - # for elem in source, *elements.values(): - # print(element_to_str(elem)) - # print() - - main_loop = GLib.MainLoop.new( # type: GLib.MainLoop - None, False # type: ignore - ) + print("Created pipeline: ") + print(" ! ".join(element_to_str(e) for e in (source, *elements.values()))) def eos_handler(_, message: Gst.Message): nonlocal pipeline if message.type == Gst.MessageType.EOS: - print("We reached EOS!") + print("[ OK ] We reached EOS!") # use closure here since this function must take 2 parameters # none of which can be the pipeline pipeline.set_state(Gst.State.NULL) @@ -204,21 +217,29 @@ def eos_handler(_, message: Gst.Message): def main(): devices = get_devices() - for di, device in enumerate(devices): + if len(devices) == 0: + print( + "GStreamer cannot find any cameras on this device.", + file=sys.stderr, + ) + exit(1) + + print("Found {} cameras!".format(len(devices))) + for dev_i, device in enumerate(devices): caps = device.get_caps() - for ci, cap in enumerate(get_all_fixated_caps(caps)): + for cap_i, capability in enumerate(get_all_fixated_caps(caps)): print( "Testing", - cap.to_string(), + capability.to_string(), "for device", device.get_display_name(), ) take_photo( device.create_element(), delay_seconds=1, - caps=cap, + caps=capability, filename="/home/zhongning/fgfg/photo_dev{}_cap{}".format( - di, ci + dev_i, cap_i ), ) From 9b59700d59772b2c70ee841880300150138f4c92 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 14:06:06 +0800 Subject: [PATCH 03/79] fix: missing handling for deocdebin linking --- .../base/bin/camera_test_auto_gst_source.py | 182 ++++++++++++------ 1 file changed, 126 insertions(+), 56 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 6525456806..4fdabba131 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -1,6 +1,10 @@ #! /usr/bin/python3 +# import time +# from checkbox_support.helpers.timeout import timeout +from enum import Enum import sys +import time import gi from argparse import ArgumentParser import typing as T @@ -20,6 +24,58 @@ ) +class ElementPrinter: + included_properties = { + "capsfilter": ("caps",), + "multifilesink": ("location",), + } + global_exclude_keys = ("parent", "client-name") + simple_elements = ("videoconvert", "decodebin", "videorate", "jpegenc") + + @staticmethod + def print(element: Gst.Element): + properties = element.list_properties() # list[GObject.GParamSpec] + element_name = element.get_factory().get_name() + + if element_name in ElementPrinter.simple_elements: + return element_name + + print_full = element_name not in ElementPrinter.included_properties + + prop_strings = [] # type: list[str] + for prop in properties: + if ( + not print_full + and prop.name + not in ElementPrinter.included_properties[element_name] + ): + continue + if prop.name in ElementPrinter.global_exclude_keys: + continue + prop_value = element.get_property(prop.name) + if hasattr(prop_value, "to_string"): + # sometimes we have a nice to_string method, prioritize this + prop_strings.append( + "{}={}".format(prop.name, prop_value.to_string()) + ) + elif prop.name == "caps": + # need to double quote the caps + prop_strings.append( + '{}="{}"'.format(prop.name, prop_value.to_string()) + ) + elif type(prop_value) is Enum: + prop_strings.append( + "{}={}".format(prop.name, prop_value.value) + ) + else: + prop_strings.append( + "{}={}".format(prop.name, str(prop_value)) + ) # handle native python types + return "{} {}".format( + element_name, " ".join(prop_strings) + ) # libcamerasrc name=cam_name location=p.jpeg + + def get_devices() -> T.List[Gst.Device]: monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor monitor.add_filter("Video/Source") @@ -51,55 +107,54 @@ def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: return fixated_caps -def pipeline_add_many(pipeline: Gst.Pipeline, *elements: Gst.Element): +def pipeline_add_many( + pipeline: Gst.Pipeline, elements: T.Iterable[Gst.Element] +): for elem in elements: pipeline.add(elem) -def element_link_many(*elements: Gst.Element): +def element_link_many(elements: T.Iterable[Gst.Element]): elem_list = list(elements) for i in range(len(elem_list) - 1): - assert Gst.Element.link(elem_list[i], elem_list[i + 1]), "not linked!" - - -def element_to_str( - element: Gst.Element, - exclude=["parent"], - simple_elem=["jpegdec", "videoconvert", "videorate", "multifilesink"], -) -> str: - """Stringifies the given element - - :param element: which element to convert to str - :param exclude: which property names to exclude, defaults to ["parent"] - :return: string usable in gst-launch-1.0 - """ - properties = element.list_properties() # list[GObject.GParamSpec] - element_name = element.get_factory().get_name() - - if element_name in simple_elem: - return element_name - - prop_strings = [] # type: list[str] - for prop in properties: - if prop.name in exclude: - continue - prop_value = element.get_property(prop.name) - if hasattr(prop_value, "to_string"): - # sometimes we have a nice to_string method, prioritize this - prop_strings.append( - "{}={}".format(prop.name, prop_value.to_string()) - ) - else: - prop_strings.append( - "{}={}".format(prop.name, str(prop_value)) - ) # handle native python types - return "{} {}".format( - element_name, " ".join(prop_strings) - ) # libcamerasrc name=cam_name location=p.jpeg + e1, e2 = elem_list[i], elem_list[i + 1] + is_linked = Gst.Element.link(e1, e2) + if not is_linked: + if e1.get_factory().get_name() == "decodebin": + e2_copy = e2 # force a reference copy + + def on_pad_added( + decodebin: Gst.Element, decodebin_src: Gst.Pad + ): + nonlocal e2_copy + print("\n\ndecode bin pad added\n\n") + e2_name = e2_copy.get_factory().get_name() + e2_sink = e2_copy.get_static_pad("sink") + assert e2_sink, "Null sink" + assert decodebin.link(e2_copy) + print("Linked decodebin to {}".format(e2_name)) + + e1.connect("pad-added", on_pad_added) + else: + raise RuntimeError( + "{} and {} could not be linked!".format( + ElementPrinter.print(e1), + ElementPrinter.print(e2), + ) + ) def parse_args(): - parser = ArgumentParser() + p = ArgumentParser() + p.add_argument( + "-s", + "--wait-seconds", + type=int, + help="Number of seconds to keep the pipeline running " + "before taking the photo. Default = 1", + default=1, + ) + return p.parse_args() def take_photo( @@ -109,6 +164,15 @@ def take_photo( filename="/home/fgfg/photo", delay_seconds=0, ): + """Take a photo using the source element + + :param source: The camera source element + :param caps: Which capability to use for the source + - If none, no caps filter will be inserted between source and decoder + :param filename: the path to the photo + :param delay_seconds: number of seconds to keep the pipeline running + before taking the photo + """ # key is the name, value is the element. Ordered elements = { "source-capsfilter": Gst.ElementFactory.make( @@ -131,18 +195,15 @@ def take_photo( ) if caps: - assert ( - caps.is_fixed() - ), '"{}" is not fixed. If caps.fixate was called before, then this is a bug in GStreamer'.format( - caps.to_string() - ) + assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) elements["source-capsfilter"].set_property("caps", caps) # structure 0 is guaranteed to exist mime_type = caps.get_structure(0).get_name() # type: str if mime_type == "image/jpeg": + # decodebin has funny clock problem with image/jpeg elements["decoder"] = Gst.ElementFactory.make("jpegdec") - assert elements["decoder"] is not None - elif mime_type == "video/x-raw": + assert elements["decoder"] is not None + if mime_type == "video/x-raw": # don't need a decoder for raw del elements["decoder"] # else case is using decodebin as a fallback @@ -166,9 +227,10 @@ def take_photo( num_buffers = delay_seconds * int(framerate_match.group(1)) source.set_property("num-buffers", num_buffers) print( - "[ INFO ] Dynamically computed num-buffers={} to delay {} seconds".format( + "[ INFO ] Dynamically computed", + "num-buffers={} to delay {} seconds".format( num_buffers, delay_seconds - ) + ), ) else: source.set_property("num-buffers", 60) @@ -185,14 +247,15 @@ def take_photo( del elements["video-rate-capsfilter"] # link elements and create pipeline - pipeline = Gst.Pipeline() # type: Gst.Pipeline + final_elements = (source, *elements.values()) + pipeline = Gst.Pipeline() # Gst.Pipeline.add_many and Gst.Element.link_many # do not exist in default ubuntu gst python binding - pipeline_add_many(pipeline, source, *elements.values()) - element_link_many(source, *elements.values()) + pipeline_add_many(pipeline, final_elements) + element_link_many(final_elements) print("Created pipeline: ") - print(" ! ".join(element_to_str(e) for e in (source, *elements.values()))) + print(" ! ".join(ElementPrinter.print(e) for e in final_elements)) def eos_handler(_, message: Gst.Message): nonlocal pipeline @@ -202,6 +265,10 @@ def eos_handler(_, message: Gst.Message): # none of which can be the pipeline pipeline.set_state(Gst.State.NULL) main_loop.quit() + if message.type == Gst.MessageType.ELEMENT: + print(("Element message from bus:"), bus.name) + message_string = message.get_structure().to_string() # type: ignore + print(message_string) bus = pipeline.get_bus() bus.add_signal_watch() @@ -209,13 +276,14 @@ def eos_handler(_, message: Gst.Message): print("Setting playing state") pipeline.set_state(Gst.State.PLAYING) - + print("Playing") main_loop.run() while main_loop.is_running(): pass def main(): + args = parse_args() devices = get_devices() if len(devices) == 0: print( @@ -229,19 +297,21 @@ def main(): caps = device.get_caps() for cap_i, capability in enumerate(get_all_fixated_caps(caps)): print( - "Testing", + "[ INFO ] Testing", capability.to_string(), "for device", device.get_display_name(), ) take_photo( device.create_element(), - delay_seconds=1, + delay_seconds=args.wait_seconds, caps=capability, filename="/home/zhongning/fgfg/photo_dev{}_cap{}".format( dev_i, cap_i ), ) + break + break if __name__ == "__main__": From 8ca0d3b173732cd5fa36c951b381899ff57bcf75 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 14:29:04 +0800 Subject: [PATCH 04/79] feat: simplify caps fixation, add a timeout in case the pipeline hangs --- .../base/bin/camera_test_auto_gst_source.py | 80 ++++++++++--------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 4fdabba131..0e02c147a5 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -1,15 +1,14 @@ #! /usr/bin/python3 -# import time -# from checkbox_support.helpers.timeout import timeout from enum import Enum import sys -import time import gi from argparse import ArgumentParser import typing as T import re +from checkbox_support.helpers.timeout import timeout + # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py gi.require_version("Gst", "1.0") @@ -29,8 +28,14 @@ class ElementPrinter: "capsfilter": ("caps",), "multifilesink": ("location",), } - global_exclude_keys = ("parent", "client-name") - simple_elements = ("videoconvert", "decodebin", "videorate", "jpegenc") + global_exclude_keys = ("parent", "client-name", "fd") + simple_elements = ( + "videoconvert", + "decodebin", + "videorate", + "jpegenc", + "jpegdec", + ) @staticmethod def print(element: Gst.Element): @@ -53,16 +58,16 @@ def print(element: Gst.Element): if prop.name in ElementPrinter.global_exclude_keys: continue prop_value = element.get_property(prop.name) - if hasattr(prop_value, "to_string"): - # sometimes we have a nice to_string method, prioritize this - prop_strings.append( - "{}={}".format(prop.name, prop_value.to_string()) - ) - elif prop.name == "caps": + if prop.name == "caps": # need to double quote the caps prop_strings.append( '{}="{}"'.format(prop.name, prop_value.to_string()) ) + elif hasattr(prop_value, "to_string"): + # sometimes we have a nice to_string method, prioritize this + prop_strings.append( + "{}={}".format(prop.name, prop_value.to_string()) + ) elif type(prop_value) is Enum: prop_strings.append( "{}={}".format(prop.name, prop_value.value) @@ -93,16 +98,14 @@ def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: :param caps: A mixed Gst.Caps """ fixated_caps = [] - for cap_str in caps.to_string().split(";"): - mixed_caps = Gst.Caps.from_string(cap_str) - while not mixed_caps.is_fixed(): - # keep fixiating it until it's fixed - fixated_cap = mixed_caps.fixate() - fixated_caps.append(fixated_cap) - mixed_caps = mixed_caps.subtract(fixated_cap) - # this is useful to get around missing types - # in default gst python binding on ubuntu, like Gst.Fraction - fixated_caps.append(mixed_caps) # append tha final one + while not caps.is_fixed(): + # keep fixiating it until it's fixed + fixated_cap = caps.fixate() + fixated_caps.append(fixated_cap) + caps = caps.subtract(fixated_cap) + # this is useful to get around missing types + # in default gst python binding on ubuntu, like Gst.Fraction + fixated_caps.append(caps) # append tha final one return fixated_caps @@ -117,29 +120,28 @@ def pipeline_add_many( def element_link_many(elements: T.Iterable[Gst.Element]): elem_list = list(elements) for i in range(len(elem_list) - 1): - e1, e2 = elem_list[i], elem_list[i + 1] - is_linked = Gst.Element.link(e1, e2) + elem1, elem2 = elem_list[i], elem_list[i + 1] + is_linked = Gst.Element.link(elem1, elem2) if not is_linked: - if e1.get_factory().get_name() == "decodebin": - e2_copy = e2 # force a reference copy + if elem1.get_factory().get_name() == "decodebin": + e2_copy = elem2 # force a reference copy def on_pad_added( decodebin: Gst.Element, decodebin_src: Gst.Pad ): - nonlocal e2_copy print("\n\ndecode bin pad added\n\n") e2_name = e2_copy.get_factory().get_name() e2_sink = e2_copy.get_static_pad("sink") assert e2_sink, "Null sink" - assert decodebin.link(e2_copy) + assert decodebin.link(e2_copy), f"cannot link to {e2_name}" print("Linked decodebin to {}".format(e2_name)) - e1.connect("pad-added", on_pad_added) + elem1.connect("pad-added", on_pad_added) else: raise RuntimeError( "{} and {} could not be linked!".format( - ElementPrinter.print(e1), - ElementPrinter.print(e2), + ElementPrinter.print(elem1), + ElementPrinter.print(elem2), ) ) @@ -274,12 +276,16 @@ def eos_handler(_, message: Gst.Message): bus.add_signal_watch() bus.connect("message", eos_handler) - print("Setting playing state") - pipeline.set_state(Gst.State.PLAYING) - print("Playing") - main_loop.run() - while main_loop.is_running(): - pass + @timeout(delay_seconds + 5) + def run(): + print("Setting playing state") + pipeline.set_state(Gst.State.PLAYING) + print("Playing") + main_loop.run() + while main_loop.is_running(): + pass + + run() def main(): @@ -310,8 +316,6 @@ def main(): dev_i, cap_i ), ) - break - break if __name__ == "__main__": From e603d8c59f31b3ada56c105d08bd89dbfc3d77c7 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 14:40:56 +0800 Subject: [PATCH 05/79] fix: raise an error if state change failed --- providers/base/bin/camera_test_auto_gst_source.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 0e02c147a5..e13ed580ea 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -267,10 +267,6 @@ def eos_handler(_, message: Gst.Message): # none of which can be the pipeline pipeline.set_state(Gst.State.NULL) main_loop.quit() - if message.type == Gst.MessageType.ELEMENT: - print(("Element message from bus:"), bus.name) - message_string = message.get_structure().to_string() # type: ignore - print(message_string) bus = pipeline.get_bus() bus.add_signal_watch() @@ -280,7 +276,10 @@ def eos_handler(_, message: Gst.Message): def run(): print("Setting playing state") pipeline.set_state(Gst.State.PLAYING) - print("Playing") + if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: + raise RuntimeError("Failed to transition to playing state") + + print("Playing!") main_loop.run() while main_loop.is_running(): pass From 8d861165a41b18c4acb696ee1b2307825e7401fe Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:19:17 +0800 Subject: [PATCH 06/79] feat: simplify all static elemnt creation --- .../base/bin/camera_test_auto_gst_source.py | 274 +++++------------- 1 file changed, 77 insertions(+), 197 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index e13ed580ea..8439f63661 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -5,9 +5,6 @@ import gi from argparse import ArgumentParser import typing as T -import re - -from checkbox_support.helpers.timeout import timeout # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py @@ -23,64 +20,6 @@ ) -class ElementPrinter: - included_properties = { - "capsfilter": ("caps",), - "multifilesink": ("location",), - } - global_exclude_keys = ("parent", "client-name", "fd") - simple_elements = ( - "videoconvert", - "decodebin", - "videorate", - "jpegenc", - "jpegdec", - ) - - @staticmethod - def print(element: Gst.Element): - properties = element.list_properties() # list[GObject.GParamSpec] - element_name = element.get_factory().get_name() - - if element_name in ElementPrinter.simple_elements: - return element_name - - print_full = element_name not in ElementPrinter.included_properties - - prop_strings = [] # type: list[str] - for prop in properties: - if ( - not print_full - and prop.name - not in ElementPrinter.included_properties[element_name] - ): - continue - if prop.name in ElementPrinter.global_exclude_keys: - continue - prop_value = element.get_property(prop.name) - if prop.name == "caps": - # need to double quote the caps - prop_strings.append( - '{}="{}"'.format(prop.name, prop_value.to_string()) - ) - elif hasattr(prop_value, "to_string"): - # sometimes we have a nice to_string method, prioritize this - prop_strings.append( - "{}={}".format(prop.name, prop_value.to_string()) - ) - elif type(prop_value) is Enum: - prop_strings.append( - "{}={}".format(prop.name, prop_value.value) - ) - else: - prop_strings.append( - "{}={}".format(prop.name, str(prop_value)) - ) # handle native python types - return "{} {}".format( - element_name, " ".join(prop_strings) - ) # libcamerasrc name=cam_name location=p.jpeg - - def get_devices() -> T.List[Gst.Device]: monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor monitor.add_filter("Video/Source") @@ -105,47 +44,11 @@ def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: caps = caps.subtract(fixated_cap) # this is useful to get around missing types # in default gst python binding on ubuntu, like Gst.Fraction - fixated_caps.append(caps) # append tha final one + fixated_caps.append(caps) # append the final one return fixated_caps -def pipeline_add_many( - pipeline: Gst.Pipeline, elements: T.Iterable[Gst.Element] -): - for elem in elements: - pipeline.add(elem) - - -def element_link_many(elements: T.Iterable[Gst.Element]): - elem_list = list(elements) - for i in range(len(elem_list) - 1): - elem1, elem2 = elem_list[i], elem_list[i + 1] - is_linked = Gst.Element.link(elem1, elem2) - if not is_linked: - if elem1.get_factory().get_name() == "decodebin": - e2_copy = elem2 # force a reference copy - - def on_pad_added( - decodebin: Gst.Element, decodebin_src: Gst.Pad - ): - print("\n\ndecode bin pad added\n\n") - e2_name = e2_copy.get_factory().get_name() - e2_sink = e2_copy.get_static_pad("sink") - assert e2_sink, "Null sink" - assert decodebin.link(e2_copy), f"cannot link to {e2_name}" - print("Linked decodebin to {}".format(e2_name)) - - elem1.connect("pad-added", on_pad_added) - else: - raise RuntimeError( - "{} and {} could not be linked!".format( - ElementPrinter.print(elem1), - ElementPrinter.print(elem2), - ) - ) - - def parse_args(): p = ArgumentParser() p.add_argument( @@ -154,11 +57,44 @@ def parse_args(): type=int, help="Number of seconds to keep the pipeline running " "before taking the photo. Default = 1", - default=1, + default=2, + ) + p.add_argument( + "-p", + "--path", + type=str, + help="Where to save all the files", + default="/home/ubuntu", ) return p.parse_args() +def elem_to_str(element: Gst.Element) -> str: + properties = element.list_properties() # list[GObject.GParamSpec] + element_name = element.get_factory().get_name() + + exclude = ["parent", "client"] + prop_strings = [] # type: list[str] + for prop in properties: + if prop.name in exclude: + continue + prop_value = element.get_property(prop.name) + if hasattr(prop_value, "to_string"): + # sometimes we have a nice to_string method, prioritize this + prop_strings.append( + "{}={}".format(prop.name, prop_value.to_string()) + ) + elif type(prop_value) is Enum: + prop_strings.append("{}={}".format(prop.name, prop_value.value)) + else: + prop_strings.append( + "{}={}".format(prop.name, str(prop_value)) + ) # handle native python types + return "{} {}".format( + element_name, " ".join(prop_strings) + ) # libcamerasrc name=cam_name location=p.jpeg + + def take_photo( source: Gst.Element, *, @@ -176,115 +112,59 @@ def take_photo( before taking the photo """ # key is the name, value is the element. Ordered - elements = { - "source-capsfilter": Gst.ElementFactory.make( - "capsfilter", "source-capsfilter" - ), - "decoder": Gst.ElementFactory.make("decodebin"), - "videoconvert": Gst.ElementFactory.make("videoconvert"), - "videorate": Gst.ElementFactory.make("videorate"), - "video-rate-capsfilter": Gst.ElementFactory.make( - "capsfilter", "video-rate-capsfilter" - ), - "jpegenc": Gst.ElementFactory.make("jpegenc"), - "multifilesink": Gst.ElementFactory.make("multifilesink"), - } # type: dict[str, Gst.Element] - assert all(element is not None for element in elements.values()) - - # set properties - elements["multifilesink"].set_property( - "location", "{}.jpeg".format(filename) - ) + + str_elements = [ + "capsfilter name=source-caps caps={}", # 0 + "decodebin", # 1 + "videoconvert", # 2 + "videorate", # 3 + "capsfilter name=videorate-caps caps=video/x-raw,framerate=1/{}", # 4 + "jpegenc", # 5 + "multifilesink location={}".format(filename), # 6 + ] if caps: assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) - elements["source-capsfilter"].set_property("caps", caps) - # structure 0 is guaranteed to exist + str_elements[0] = str_elements[0].format(caps.to_string()) mime_type = caps.get_structure(0).get_name() # type: str if mime_type == "image/jpeg": # decodebin has funny clock problem with image/jpeg - elements["decoder"] = Gst.ElementFactory.make("jpegdec") - assert elements["decoder"] is not None - if mime_type == "video/x-raw": + str_elements[1] = "jpegdec" + elif mime_type == "video/x-raw": # don't need a decoder for raw - del elements["decoder"] + str_elements[1] = "" # else case is using decodebin as a fallback - else: - # remove the initial capsfilter if unused - del elements["source-capsfilter"] if delay_seconds > 0 and caps: - elements["video-rate-capsfilter"].set_property( - "caps", - Gst.Caps.from_string( - "video/x-raw,framerate=1/{}".format(delay_seconds) - ), - ) - # framerate=(fraction)30/1, we can assume this format - # because caps is fixated - framerate_match = re.search( - r"framerate=\(fraction\)(\d+)\/1", caps.to_string() - ) - if framerate_match is not None: - num_buffers = delay_seconds * int(framerate_match.group(1)) - source.set_property("num-buffers", num_buffers) - print( - "[ INFO ] Dynamically computed", - "num-buffers={} to delay {} seconds".format( - num_buffers, delay_seconds - ), - ) - else: - source.set_property("num-buffers", 60) - print( - "[ ERR ] Non standard framerate object: {}".format( - caps.to_string() - ), - "Defaulting to 60 buffers", - file=sys.stderr, - ) - + str_elements[4] = str_elements[4].format(delay_seconds) else: - del elements["source-capsfilter"] - del elements["video-rate-capsfilter"] - - # link elements and create pipeline - final_elements = (source, *elements.values()) - pipeline = Gst.Pipeline() - # Gst.Pipeline.add_many and Gst.Element.link_many - # do not exist in default ubuntu gst python binding - pipeline_add_many(pipeline, final_elements) - element_link_many(final_elements) - - print("Created pipeline: ") - print(" ! ".join(ElementPrinter.print(e) for e in final_elements)) - - def eos_handler(_, message: Gst.Message): - nonlocal pipeline - if message.type == Gst.MessageType.EOS: - print("[ OK ] We reached EOS!") - # use closure here since this function must take 2 parameters - # none of which can be the pipeline - pipeline.set_state(Gst.State.NULL) - main_loop.quit() - - bus = pipeline.get_bus() - bus.add_signal_watch() - bus.connect("message", eos_handler) - - @timeout(delay_seconds + 5) - def run(): - print("Setting playing state") - pipeline.set_state(Gst.State.PLAYING) - if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: - raise RuntimeError("Failed to transition to playing state") - - print("Playing!") - main_loop.run() - while main_loop.is_running(): - pass - - run() + str_elements[3] = "" + str_elements[4] = "" + + partial = " ! ".join(elem for elem in str_elements if elem != "") + pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline + + assert pipeline.add(source) + head_elem = pipeline.get_by_name("source-caps") + assert head_elem + assert source.link(head_elem) + + print("Created pipeline: {} ! {}".format(elem_to_str(source), partial)) + + print("Setting playing state") + pipeline.set_state(Gst.State.PLAYING) + if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: + raise RuntimeError("Failed to transition to playing state") + + def quit(): + pipeline.set_state(Gst.State.NULL) + main_loop.quit() + + print("Playing!") + GLib.timeout_add_seconds(delay_seconds, quit) + main_loop.run() + while main_loop.is_running(): + pass def main(): From 81aec83133380c8e714fd379eb9655bb375d2847 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:46:12 +0800 Subject: [PATCH 07/79] fix: add timeout to handle pipeline hangs --- .../base/bin/camera_test_auto_gst_source.py | 58 +++++++++++++------ 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 8439f63661..5dc2e22161 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -5,6 +5,7 @@ import gi from argparse import ArgumentParser import typing as T +from checkbox_support.helpers.timeout import timeout # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py @@ -73,12 +74,20 @@ def elem_to_str(element: Gst.Element) -> str: properties = element.list_properties() # list[GObject.GParamSpec] element_name = element.get_factory().get_name() - exclude = ["parent", "client"] + exclude = ["parent", "client-name"] prop_strings = [] # type: list[str] for prop in properties: if prop.name in exclude: continue - prop_value = element.get_property(prop.name) + try: + prop_value = element.get_property(prop.name) + except: + print( + "[ INFO ] Property {} is unreadable in {}".format( + prop.name, element_name + ) + ) + continue if hasattr(prop_value, "to_string"): # sometimes we have a nice to_string method, prioritize this prop_strings.append( @@ -99,7 +108,7 @@ def take_photo( source: Gst.Element, *, caps: T.Optional[Gst.Caps] = None, - filename="/home/fgfg/photo", + file_path: str, delay_seconds=0, ): """Take a photo using the source element @@ -111,16 +120,17 @@ def take_photo( :param delay_seconds: number of seconds to keep the pipeline running before taking the photo """ - # key is the name, value is the element. Ordered + # this may seem unorthodox + # but it's way less verbose than creating individual elements str_elements = [ - "capsfilter name=source-caps caps={}", # 0 + 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert", # 2 "videorate", # 3 - "capsfilter name=videorate-caps caps=video/x-raw,framerate=1/{}", # 4 + 'capsfilter name=videorate-caps caps="video/x-raw,framerate=1/{}"', # 4 "jpegenc", # 5 - "multifilesink location={}".format(filename), # 6 + "multifilesink location={}".format(file_path), # 6 ] if caps: @@ -135,7 +145,7 @@ def take_photo( str_elements[1] = "" # else case is using decodebin as a fallback - if delay_seconds > 0 and caps: + if delay_seconds > 0: str_elements[4] = str_elements[4].format(delay_seconds) else: str_elements[3] = "" @@ -149,23 +159,35 @@ def take_photo( assert head_elem assert source.link(head_elem) - print("Created pipeline: {} ! {}".format(elem_to_str(source), partial)) + print( + "[ OK ] Created pipeline: {} ! {}".format(elem_to_str(source), partial) + ) - print("Setting playing state") - pipeline.set_state(Gst.State.PLAYING) - if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: - raise RuntimeError("Failed to transition to playing state") + print("[ INFO ] Setting playing state") + + @timeout(5) # pipeline needs to start within 5 seconds + def start(): + pipeline.set_state(Gst.State.PLAYING) + # it's possible to hang here if the source is broken + # but the main thread will keep running, + # so we check both an explicit fail and a hang + if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: + raise RuntimeError("Failed to transition to playing state") def quit(): pipeline.set_state(Gst.State.NULL) main_loop.quit() - print("Playing!") + start() + print("[ OK ] Pipeline is playing!") + GLib.timeout_add_seconds(delay_seconds, quit) main_loop.run() while main_loop.is_running(): pass + print("[ OK ] Photo was saved to {}".format(file_path)) + def main(): args = parse_args() @@ -183,16 +205,16 @@ def main(): for cap_i, capability in enumerate(get_all_fixated_caps(caps)): print( "[ INFO ] Testing", - capability.to_string(), + '"{}"'.format(capability.to_string()), "for device", - device.get_display_name(), + '"{}"'.format(device.get_display_name()), ) take_photo( device.create_element(), delay_seconds=args.wait_seconds, caps=capability, - filename="/home/zhongning/fgfg/photo_dev{}_cap{}".format( - dev_i, cap_i + file_path="{}/photo_dev{}_cap{}.jpeg".format( + args.path, dev_i, cap_i ), ) From 20774de74852f9434050cc96853db1f0fd415a19 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 21 Nov 2024 17:35:12 +0800 Subject: [PATCH 08/79] feat: display preview --- .../base/bin/camera_test_auto_gst_source.py | 81 ++++++++++++++----- 1 file changed, 59 insertions(+), 22 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 5dc2e22161..f126f1dfc4 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -5,17 +5,25 @@ import gi from argparse import ArgumentParser import typing as T -from checkbox_support.helpers.timeout import timeout + +# from checkbox_support.helpers.timeout import timeout +timeout = lambda _: lambda f: f # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py + +gi.require_version("Gtk", "3.0") +from gi.repository import Gtk # type: ignore + gi.require_version("Gst", "1.0") from gi.repository import Gst # type: ignore gi.require_version("GLib", "2.0") from gi.repository import GLib # type: ignore + Gst.init(None) +Gtk.init([]) main_loop = GLib.MainLoop.new( # type: GLib.MainLoop None, False # type: ignore ) @@ -104,6 +112,50 @@ def elem_to_str(element: Gst.Element) -> str: ) # libcamerasrc name=cam_name location=p.jpeg +def run_pipeline(pipeline: Gst.Pipeline, run_n_seconds: int): + @timeout(5) # pipeline needs to start within 5 seconds + def start(): + pipeline.set_state(Gst.State.PLAYING) + # it's possible to hang here if the source is broken + # but the main thread will keep running, + # so we check both an explicit fail and a hang + if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: + raise RuntimeError("Failed to transition to playing state") + + def quit(): + pipeline.set_state(Gst.State.NULL) + main_loop.quit() + + start() + print("[ OK ] Pipeline is playing!") + + GLib.timeout_add_seconds(run_n_seconds, quit) + + main_loop.run() + + +def display_viewfinder( + source: Gst.Element, + *, + show_n_seconds=5, +): + partial_pipeline = "videoconvert name=head ! autovideosink name=sink" + pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline + head = pipeline.get_by_name("head") + print(pipeline.get_by_name("sink")) + + assert pipeline.add(source) + assert head + assert source.link(head) + + print( + "[ OK ] Created pipeline: {} ! {}".format( + elem_to_str(source), partial_pipeline + ) + ) + run_pipeline(pipeline, show_n_seconds) + + def take_photo( source: Gst.Element, *, @@ -116,7 +168,7 @@ def take_photo( :param source: The camera source element :param caps: Which capability to use for the source - If none, no caps filter will be inserted between source and decoder - :param filename: the path to the photo + :param file_path: the path to the photo :param delay_seconds: number of seconds to keep the pipeline running before taking the photo """ @@ -133,6 +185,8 @@ def take_photo( "multifilesink location={}".format(file_path), # 6 ] + # using empty string as null values here + # they are filtered out at parse_launch if caps: assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) str_elements[0] = str_elements[0].format(caps.to_string()) @@ -165,26 +219,7 @@ def take_photo( print("[ INFO ] Setting playing state") - @timeout(5) # pipeline needs to start within 5 seconds - def start(): - pipeline.set_state(Gst.State.PLAYING) - # it's possible to hang here if the source is broken - # but the main thread will keep running, - # so we check both an explicit fail and a hang - if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: - raise RuntimeError("Failed to transition to playing state") - - def quit(): - pipeline.set_state(Gst.State.NULL) - main_loop.quit() - - start() - print("[ OK ] Pipeline is playing!") - - GLib.timeout_add_seconds(delay_seconds, quit) - main_loop.run() - while main_loop.is_running(): - pass + run_pipeline(pipeline, delay_seconds) print("[ OK ] Photo was saved to {}".format(file_path)) @@ -217,6 +252,8 @@ def main(): args.path, dev_i, cap_i ), ) + break + display_viewfinder(device.create_element()) if __name__ == "__main__": From 6297a2bb820a9fe416e6998fcde704735ebfa541 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:40:04 +0800 Subject: [PATCH 09/79] style: err message --- providers/base/bin/camera_test_auto_gst_source.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index f126f1dfc4..2dfb6fed6f 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -47,7 +47,7 @@ def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: """ fixated_caps = [] while not caps.is_fixed(): - # keep fixiating it until it's fixed + # keep fixating it until it's fixed fixated_cap = caps.fixate() fixated_caps.append(fixated_cap) caps = caps.subtract(fixated_cap) @@ -230,6 +230,8 @@ def main(): if len(devices) == 0: print( "GStreamer cannot find any cameras on this device.", + "If you know a camera element exists, then it did not implement", + "Gst.DeviceProvider to make itself visible to GStreamer", file=sys.stderr, ) exit(1) From 204cfcf3625e23999de1de8c86c0821c259c7061 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Fri, 6 Dec 2024 15:37:40 +0800 Subject: [PATCH 10/79] feat: video recording --- .../base/bin/camera_test_auto_gst_source.py | 303 +++++++++++++++--- 1 file changed, 257 insertions(+), 46 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 2dfb6fed6f..d212a7b5ca 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -1,16 +1,24 @@ #! /usr/bin/python3 from enum import Enum -import sys +import os +import time import gi from argparse import ArgumentParser import typing as T +import logging -# from checkbox_support.helpers.timeout import timeout -timeout = lambda _: lambda f: f +VoidFn = T.Callable[[], None] # takes nothing and returns nothing # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py +logging.basicConfig( + format="%(asctime)s %(levelname)s - %(message)s", + datefmt="%m/%d %H:%M:%S", + level=logging.DEBUG, +) +logger = logging.getLogger(__name__) + gi.require_version("Gtk", "3.0") from gi.repository import Gtk # type: ignore @@ -41,31 +49,31 @@ def get_devices() -> T.List[Gst.Device]: def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: - """Gets all the fixated(unique) caps from a Gst.Caps object + """Gets all the fixated(1 value per property) caps from a Gst.Caps object :param caps: A mixed Gst.Caps """ - fixated_caps = [] + fixed_caps = [] while not caps.is_fixed(): # keep fixating it until it's fixed - fixated_cap = caps.fixate() - fixated_caps.append(fixated_cap) - caps = caps.subtract(fixated_cap) + fixed_cap = caps.fixate() + fixed_caps.append(fixed_cap) + caps = caps.subtract(fixed_cap) # this is useful to get around missing types # in default gst python binding on ubuntu, like Gst.Fraction - fixated_caps.append(caps) # append the final one + fixed_caps.append(caps) # append the final one - return fixated_caps + return fixed_caps def parse_args(): p = ArgumentParser() p.add_argument( - "-s", + "-ws", "--wait-seconds", type=int, help="Number of seconds to keep the pipeline running " - "before taking the photo. Default = 1", + "before taking the photo. Default = 2", default=2, ) p.add_argument( @@ -79,24 +87,35 @@ def parse_args(): def elem_to_str(element: Gst.Element) -> str: + """Prints an element to string + - Excluding parent & client name + :param element: gstreamer element + :return: String representaion + """ properties = element.list_properties() # list[GObject.GParamSpec] element_name = element.get_factory().get_name() exclude = ["parent", "client-name"] prop_strings = [] # type: list[str] + for prop in properties: if prop.name in exclude: continue + try: prop_value = element.get_property(prop.name) except: - print( - "[ INFO ] Property {} is unreadable in {}".format( + logger.info( + "Property {} is unreadable in {}".format( prop.name, element_name - ) + ) # not every property is readable, ignore unreadable ones ) continue - if hasattr(prop_value, "to_string"): + + if ( + hasattr(prop_value, "to_string") + and type(prop_value.to_string).__name__ == "method" + ): # sometimes we have a nice to_string method, prioritize this prop_strings.append( "{}={}".format(prop.name, prop_value.to_string()) @@ -107,29 +126,53 @@ def elem_to_str(element: Gst.Element) -> str: prop_strings.append( "{}={}".format(prop.name, str(prop_value)) ) # handle native python types + return "{} {}".format( element_name, " ".join(prop_strings) ) # libcamerasrc name=cam_name location=p.jpeg -def run_pipeline(pipeline: Gst.Pipeline, run_n_seconds: int): - @timeout(5) # pipeline needs to start within 5 seconds +def run_pipeline( + pipeline: Gst.Pipeline, + run_n_seconds: int, + intermediate_calls: T.List[T.Tuple[int, VoidFn]] = [], +): + """Run the pipeline + + :param pipeline: Gst.Pipeline. All element creation/linking steps + should be done by this point + :param run_n_seconds: how long until we stop the main loop + :param intermedate_calls: a list of functions to call + while the pipeline is running. list[(() -> None, int)], where 2nd elem + is the number of seconds to wait RELATIVE to + when the pipeline started running + :raises RuntimeError: When set_state(PLAYING) fails + """ + + # pipeline needs to start within 5 seconds def start(): pipeline.set_state(Gst.State.PLAYING) # it's possible to hang here if the source is broken # but the main thread will keep running, # so we check both an explicit fail and a hang - if pipeline.get_state(0)[0] == Gst.StateChangeReturn.FAILURE: + if pipeline.get_state(5)[0] == Gst.StateChangeReturn.FAILURE: + pipeline.set_state(Gst.State.NULL) raise RuntimeError("Failed to transition to playing state") def quit(): + logger.debug("Setting null to stop the pipeline") pipeline.set_state(Gst.State.NULL) main_loop.quit() start() - print("[ OK ] Pipeline is playing!") + logger.info(f"[ OK ] Pipeline is playing! {run_n_seconds}") GLib.timeout_add_seconds(run_n_seconds, quit) + for delay, call in intermediate_calls: + assert ( + delay < run_n_seconds + ), "delay for each call must be smaller than total run seconds" + GLib.timeout_add_seconds(delay, call) main_loop.run() @@ -139,17 +182,24 @@ def display_viewfinder( *, show_n_seconds=5, ): - partial_pipeline = "videoconvert name=head ! autovideosink name=sink" + """Shows a viewfinder for the given camera source + + :param source: camera source element. + If there is any property that needs to be set, + do that before calling this function + :param show_n_seconds: number of seconds to keep the viewfinder on screen + """ + + partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline head = pipeline.get_by_name("head") - print(pipeline.get_by_name("sink")) assert pipeline.add(source) assert head assert source.link(head) - print( - "[ OK ] Created pipeline: {} ! {}".format( + logging.info( + "[ OK ] Created pipeline for viewfinder: {} ! {}".format( elem_to_str(source), partial_pipeline ) ) @@ -167,9 +217,9 @@ def take_photo( :param source: The camera source element :param caps: Which capability to use for the source - - If none, no caps filter will be inserted between source and decoder + - If None, no caps filter will be inserted between source and decoder :param file_path: the path to the photo - :param delay_seconds: number of seconds to keep the pipeline running + :param delay_seconds: number of seconds to keep the source "open" before taking the photo """ @@ -178,66 +228,217 @@ def take_photo( str_elements = [ 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 - "videoconvert", # 2 - "videorate", # 3 + "videoconvert name=converter", # 2 + "videorate drop-only=True", # 3 'capsfilter name=videorate-caps caps="video/x-raw,framerate=1/{}"', # 4 "jpegenc", # 5 - "multifilesink location={}".format(file_path), # 6 + "valve name=photo-valve", # 6 + "filesink location={}".format(file_path), # 7 ] + head_elem_name = "source-caps" # using empty string as null values here # they are filtered out at parse_launch if caps: assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) + str_elements[0] = str_elements[0].format(caps.to_string()) mime_type = caps.get_structure(0).get_name() # type: str + if mime_type == "image/jpeg": - # decodebin has funny clock problem with image/jpeg + # decodebin has funny clock problem with live sources in image/jpeg str_elements[1] = "jpegdec" elif mime_type == "video/x-raw": # don't need a decoder for raw str_elements[1] = "" # else case is using decodebin as a fallback + else: + # decode bin doesn't work with video/x-raw + # videorate doesn't work if source-caps was not created + str_elements[0] = str_elements[1] = str_elements[3] = str_elements[ + 4 + ] = "" + head_elem_name = "converter" if delay_seconds > 0: + # if this caps filter was deleted earlier, this does nothing str_elements[4] = str_elements[4].format(delay_seconds) else: str_elements[3] = "" str_elements[4] = "" - partial = " ! ".join(elem for elem in str_elements if elem != "") + partial = " ! ".join(elem for elem in str_elements if elem) pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline + head_elem = pipeline.get_by_name(head_elem_name) + valve = pipeline.get_by_name("photo-valve") + + # parse the partial pipeline, then get head element by name + assert pipeline.add( + source + ), "Could not add source element {} to the pipeline".format( + elem_to_str(source) + ) + assert head_elem and valve + assert source.link( + head_elem + ), "Could not link source element to {}".format(head_elem) + + def open_valve(): + logging.debug("Opening valve!") + valve.set_property("drop", False) + + logging.info( + "[ OK ] Created photo pipeline with {} second delay".format( + delay_seconds + ) + ) + # this is just printing the pipeline and separate it from the rest + logging.info("{} ! {}".format(elem_to_str(source), partial)) - assert pipeline.add(source) - head_elem = pipeline.get_by_name("source-caps") + logging.debug("Setting playing state") + + run_pipeline( + pipeline, + delay_seconds + + 1, # run the pipeline for 1 more second to take tha actual photo + [(delay_seconds, open_valve)], + ) + + logging.info( + "[ OK ] Photo for this capability: " + + "{}".format(caps.to_string() if caps else "[device default]") + + "was saved to {}".format(file_path) + ) + + +def record_video( + source: Gst.Element, + *, + caps: T.Optional[Gst.Caps] = None, + file_path: str, + record_n_seconds=0, +): + assert file_path.endswith( + ".avi" + ), "This function uses avimux, so the filename must end in .avi" + + str_elements = [ + 'capsfilter name=source-caps caps="{}"', # 0 + "decodebin", # 1 + "videoconvert name=converter", # 2 + "avimux", # 3 + "filesink location={}".format(file_path), # 4 + ] + + head_elem_name = "source-caps" + + if caps: + assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) + + str_elements[0] = str_elements[0].format(caps.to_string()) + mime_type = caps.get_structure(0).get_name() # type: str + + if mime_type == "image/jpeg": + str_elements[1] = "jpegdec" + elif mime_type == "video/x-raw": + str_elements[1] = "" + else: + # decode bin doesn't work with video/x-raw + # videorate doesn't work if source-caps was not created + str_elements[0] = str_elements[1] = "" + head_elem_name = "converter" + + partial = " ! ".join(elem for elem in str_elements if elem) + pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline + head_elem = pipeline.get_by_name(head_elem_name) + + # parse the partial pipeline, then get head element by name + assert pipeline.add( + source + ), "Could not add source element {} to the pipeline".format( + elem_to_str(source) + ) assert head_elem - assert source.link(head_elem) + assert source.link( + head_elem + ), "Could not link source element to {}".format(head_elem) - print( - "[ OK ] Created pipeline: {} ! {}".format(elem_to_str(source), partial) + logging.info( + "[ OK ] Created video pipeline to record {} seconds".format( + record_n_seconds + ) ) + logging.info("{} ! {}".format(elem_to_str(source), partial)) + logging.debug("Setting playing state") - print("[ INFO ] Setting playing state") + run_pipeline(pipeline, record_n_seconds) - run_pipeline(pipeline, delay_seconds) + logging.info( + "[ OK ] Video for this capability: " + + "{}".format(caps.to_string() if caps else "[device default]") + + "was saved to {}".format(file_path) + ) - print("[ OK ] Photo was saved to {}".format(file_path)) + """record + videotestsrc num-buffers=120 ! + queue ! + encodebin profile="video/quicktime,variant=iso:video/x-h264" ! + filesink location=video.mp4 + """ + """decode + filesrc location=video.mp4 ! decodebin ! autovideosink + """ def main(): args = parse_args() + if not os.path.isdir(args.path): + # must validate early, filesink does not check if the path exists + raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) + + if os.getuid() == 0: + logging.warning( + "Running this script as root. " + "This may lead to different results than running as regular user." + ) + devices = get_devices() if len(devices) == 0: - print( - "GStreamer cannot find any cameras on this device.", - "If you know a camera element exists, then it did not implement", - "Gst.DeviceProvider to make itself visible to GStreamer", - file=sys.stderr, + logging.error( + "GStreamer cannot find any cameras on this device. " + "If you know a camera element exists, then it did not implement " + "Gst.DeviceProvider to make itself visible to GStreamer " + "or it is inaccessible without sudo." ) exit(1) print("Found {} cameras!".format(len(devices))) + print( + '[ HINT ] For debugging, remove the "valve" element to get a pipeline', + "that can be run with gst-launch-1.0", + "Also keep the pipeline running for {} seconds".format( + args.wait_seconds + ), + ) + for dev_i, device in enumerate(devices): + dev_element = device.create_element() + record_video( + dev_element, + file_path="{}/video_dev_{}_cap_{}.avi".format( + args.path, dev_i, "default" + ), + record_n_seconds=args.wait_seconds + ) + break + take_photo( + dev_element, + delay_seconds=args.wait_seconds, + caps=None, + file_path="{}/photo_dev_{}_cap_{}.jpeg".format( + args.path, dev_i, "default" + ), + ) caps = device.get_caps() for cap_i, capability in enumerate(get_all_fixated_caps(caps)): print( @@ -247,7 +448,7 @@ def main(): '"{}"'.format(device.get_display_name()), ) take_photo( - device.create_element(), + dev_element, delay_seconds=args.wait_seconds, caps=capability, file_path="{}/photo_dev{}_cap{}.jpeg".format( @@ -255,7 +456,17 @@ def main(): ), ) break - display_viewfinder(device.create_element()) + + display_viewfinder(dev_element) + + sleep_sec = 3 + logging.info( + "Sleep {} seconds to release current device pipeline".format( + sleep_sec + ) + + '"{}"'.format(elem_to_str(dev_element)), + ) + time.sleep(3) if __name__ == "__main__": From 0ef5e5ec11e94e3752e023d3c84e6853e4d84ce6 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Fri, 6 Dec 2024 16:40:57 +0800 Subject: [PATCH 11/79] feat: video playback --- .../base/bin/camera_test_auto_gst_source.py | 47 ++++++++++++++++--- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index d212a7b5ca..bb39b2f20f 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -134,14 +134,15 @@ def elem_to_str(element: Gst.Element) -> str: def run_pipeline( pipeline: Gst.Pipeline, - run_n_seconds: int, + run_n_seconds: int = -1, intermediate_calls: T.List[T.Tuple[int, VoidFn]] = [], ): """Run the pipeline :param pipeline: Gst.Pipeline. All element creation/linking steps should be done by this point - :param run_n_seconds: how long until we stop the main loop + :param run_n_seconds: how long until we stop the main loop. + - If -1, only wait for EOS :param intermedate_calls: a list of functions to call while the pipeline is running. list[(() -> None, int)], where 2nd elem is the number of seconds to wait RELATIVE to @@ -167,16 +168,42 @@ def quit(): start() logger.info(f"[ OK ] Pipeline is playing! {run_n_seconds}") - GLib.timeout_add_seconds(run_n_seconds, quit) for delay, call in intermediate_calls: assert ( delay < run_n_seconds ), "delay for each call must be smaller than total run seconds" GLib.timeout_add_seconds(delay, call) + if run_n_seconds == -1: + bus = pipeline.get_bus() + assert bus + bus.add_signal_watch() + bus.connect( + "message", + lambda _, msg: msg.type + in (Gst.MessageType.EOS, Gst.MessageType.ERROR) + and quit(), + ) + else: + GLib.timeout_add_seconds(run_n_seconds, quit) + main_loop.run() +def play_video(filepath: str): + pipeline = Gst.parse_launch( + " ! ".join( + [ + "filesrc location={}".format(filepath), + "decodebin", + "videoconvert", + "autovideosink", + ] + ) + ) + run_pipeline(pipeline) + + def display_viewfinder( source: Gst.Element, *, @@ -307,7 +334,7 @@ def open_valve(): logging.info( "[ OK ] Photo for this capability: " + "{}".format(caps.to_string() if caps else "[device default]") - + "was saved to {}".format(file_path) + + " was saved to {}".format(file_path) ) @@ -343,7 +370,7 @@ def record_video( elif mime_type == "video/x-raw": str_elements[1] = "" else: - # decode bin doesn't work with video/x-raw + # decodebin doesn't work with video/x-raw # videorate doesn't work if source-caps was not created str_elements[0] = str_elements[1] = "" head_elem_name = "converter" @@ -402,6 +429,8 @@ def main(): "This may lead to different results than running as regular user." ) + play_video("/home/zhongning/fgfg/video_dev_0_cap_default.avi") + return devices = get_devices() if len(devices) == 0: logging.error( @@ -428,7 +457,7 @@ def main(): file_path="{}/video_dev_{}_cap_{}.avi".format( args.path, dev_i, "default" ), - record_n_seconds=args.wait_seconds + record_n_seconds=args.wait_seconds, ) break take_photo( @@ -470,4 +499,10 @@ def main(): if __name__ == "__main__": + old_env = os.environ.get("GST_DEBUG", None) + os.environ["GST_DEBUG"] = "2" # error and warnings + main() + + if old_env: + os.environ["GST_DEBUG"] = old_env From eb3b4d30ec9ee218b92a9196889fda5ac2b6629b Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:35:08 +0800 Subject: [PATCH 12/79] fix: gracefull shutdown, implement parser --- .../base/bin/camera_test_auto_gst_source.py | 172 +++++++++++------- 1 file changed, 110 insertions(+), 62 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index bb39b2f20f..e22a11eef5 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -67,28 +67,65 @@ def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: def parse_args(): - p = ArgumentParser() - p.add_argument( + parser = ArgumentParser() + + subparser = parser.add_subparsers(dest="subcommand", required=True) + photo_subparser = subparser.add_parser("take-photo") + photo_subparser.add_argument( "-ws", "--wait-seconds", type=int, help="Number of seconds to keep the pipeline running " - "before taking the photo. Default = 2", + "before taking the photo. Default = 2.", default=2, ) - p.add_argument( + # photo_subparser.add_argument( + # "-ac", + # "--all-caps", + # action="store_true", + # help="Take a photo for each camera capability. " + # "The --wait-seconds is in effect for all of them. ", + # ) + photo_subparser.add_argument( + "-p", + "--path", + type=str, + help="Where to save the file. This should be a directory.", + required=True, + ) + + video_subparser = subparser.add_parser("record-video") + video_subparser.add_argument( + "-s", + "--seconds", + type=int, + help="Number of seconds to record. Default = 5.", + default=5, + ) + video_subparser.add_argument( "-p", "--path", type=str, - help="Where to save all the files", - default="/home/ubuntu", + help="Where to save the file. This should be a directory.", + required=True, ) - return p.parse_args() + + player_subparser = subparser.add_parser("play-video") + player_subparser.add_argument( + "-p", + "--path", + type=str, + help="Path to the video file", + required=True, + ) + + return parser.parse_args() def elem_to_str(element: Gst.Element) -> str: """Prints an element to string - Excluding parent & client name + :param element: gstreamer element :return: String representaion """ @@ -161,12 +198,22 @@ def start(): raise RuntimeError("Failed to transition to playing state") def quit(): - logger.debug("Setting null to stop the pipeline") - pipeline.set_state(Gst.State.NULL) + logger.debug("Sending EOS.") + # Terminate gracefully with EOS. + # Directly setting it to null can cause videos to have timestamp issues + eos_handled = pipeline.send_event(Gst.Event.new_eos()) + + if not eos_handled: + logging.error( + "EOS was not handled by the pipeline. " + "Forcefully setting the state to NULL." + ) + pipeline.set_state(Gst.State.NULL) + main_loop.quit() start() - logger.info(f"[ OK ] Pipeline is playing! {run_n_seconds}") + logger.info(f"[ OK ] Pipeline is playing!") for delay, call in intermediate_calls: assert ( @@ -319,9 +366,7 @@ def open_valve(): delay_seconds ) ) - # this is just printing the pipeline and separate it from the rest logging.info("{} ! {}".format(elem_to_str(source), partial)) - logging.debug("Setting playing state") run_pipeline( @@ -346,15 +391,16 @@ def record_video( record_n_seconds=0, ): assert file_path.endswith( - ".avi" - ), "This function uses avimux, so the filename must end in .avi" + ".mkv" + ), "This function uses matroshkamux, so the filename must end in .mkv" str_elements = [ 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert name=converter", # 2 - "avimux", # 3 - "filesink location={}".format(file_path), # 4 + "jpegenc", # 3, avoid massiave uncompressed videos + "matroskamux", # 4 + "filesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -403,7 +449,7 @@ def record_video( logging.info( "[ OK ] Video for this capability: " + "{}".format(caps.to_string() if caps else "[device default]") - + "was saved to {}".format(file_path) + + " was saved to {}".format(file_path) ) """record @@ -419,6 +465,8 @@ def record_video( def main(): args = parse_args() + print(args) + if not os.path.isdir(args.path): # must validate early, filesink does not check if the path exists raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) @@ -429,9 +477,8 @@ def main(): "This may lead to different results than running as regular user." ) - play_video("/home/zhongning/fgfg/video_dev_0_cap_default.avi") - return devices = get_devices() + if len(devices) == 0: logging.error( "GStreamer cannot find any cameras on this device. " @@ -441,61 +488,62 @@ def main(): ) exit(1) - print("Found {} cameras!".format(len(devices))) + seconds_per_pipeline = ( + args.wait_seconds if args.subcommand == "take-photo" else args.seconds + ) + sleep_sec = 3 + logging.info("Found {} cameras!".format(len(devices))) print( '[ HINT ] For debugging, remove the "valve" element to get a pipeline', "that can be run with gst-launch-1.0", "Also keep the pipeline running for {} seconds".format( - args.wait_seconds + seconds_per_pipeline ), ) for dev_i, device in enumerate(devices): dev_element = device.create_element() - record_video( - dev_element, - file_path="{}/video_dev_{}_cap_{}.avi".format( - args.path, dev_i, "default" - ), - record_n_seconds=args.wait_seconds, - ) - break - take_photo( - dev_element, - delay_seconds=args.wait_seconds, - caps=None, - file_path="{}/photo_dev_{}_cap_{}.jpeg".format( - args.path, dev_i, "default" - ), - ) - caps = device.get_caps() - for cap_i, capability in enumerate(get_all_fixated_caps(caps)): - print( - "[ INFO ] Testing", - '"{}"'.format(capability.to_string()), - "for device", - '"{}"'.format(device.get_display_name()), - ) - take_photo( - dev_element, - delay_seconds=args.wait_seconds, - caps=capability, - file_path="{}/photo_dev{}_cap{}.jpeg".format( - args.path, dev_i, cap_i - ), - ) - break - - display_viewfinder(dev_element) - - sleep_sec = 3 + all_fixed_caps = get_all_fixated_caps(device.get_caps()) logging.info( - "Sleep {} seconds to release current device pipeline".format( - sleep_sec + "Test for this device may take {} seconds.".format( + len(all_fixed_caps) * (seconds_per_pipeline + sleep_sec) ) - + '"{}"'.format(elem_to_str(dev_element)), ) - time.sleep(3) + for cap_i, capability in enumerate(all_fixed_caps): + if args.subcommand == "take-photo": + logging.info( + "[ INFO ] Taking a photo with capability: " + + '"{}"'.format(capability.to_string()) + + "for device: " + + '"{}"'.format(device.get_display_name()), + ) + take_photo( + dev_element, + delay_seconds=args.wait_seconds, + caps=capability, + file_path="{}/photo_dev_{}_cap_{}.jpeg".format( + args.path, dev_i, cap_i + ), + ) + elif args.subcommand == "record-video": + record_video( + dev_element, + file_path="{}/video_dev_{}_cap_{}.mkv".format( + args.path, dev_i, cap_i + ), + caps=capability, + record_n_seconds=args.seconds, + ) + + logging.info( + "Sleep {} seconds to release current device pipeline ".format( + sleep_sec + ) + + '"{}"'.format(elem_to_str(dev_element)), + ) + time.sleep(sleep_sec) + + logging.info("[ OK ] All done!") if __name__ == "__main__": From a5bdd0546ead1a1833d7fef3ff247a11e87755f9 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 10 Dec 2024 21:35:05 +0800 Subject: [PATCH 13/79] fix: graceful shotdown pipelines --- .../base/bin/camera_test_auto_gst_source.py | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index e22a11eef5..c301ca7980 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -204,12 +204,15 @@ def quit(): eos_handled = pipeline.send_event(Gst.Event.new_eos()) if not eos_handled: - logging.error( - "EOS was not handled by the pipeline. " - "Forcefully setting the state to NULL." - ) - pipeline.set_state(Gst.State.NULL) + logging.error("EOS was not handled by the pipeline. ") + bus = pipeline.get_bus() + assert bus + # at this point the previous signal_watch can be overriden + # (we are in the handler) + bus.add_signal_watch() + bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS) + pipeline.set_state(Gst.State.NULL) main_loop.quit() start() @@ -491,7 +494,6 @@ def main(): seconds_per_pipeline = ( args.wait_seconds if args.subcommand == "take-photo" else args.seconds ) - sleep_sec = 3 logging.info("Found {} cameras!".format(len(devices))) print( '[ HINT ] For debugging, remove the "valve" element to get a pipeline', @@ -506,7 +508,7 @@ def main(): all_fixed_caps = get_all_fixated_caps(device.get_caps()) logging.info( "Test for this device may take {} seconds.".format( - len(all_fixed_caps) * (seconds_per_pipeline + sleep_sec) + len(all_fixed_caps) * seconds_per_pipeline ) ) for cap_i, capability in enumerate(all_fixed_caps): @@ -535,14 +537,6 @@ def main(): record_n_seconds=args.seconds, ) - logging.info( - "Sleep {} seconds to release current device pipeline ".format( - sleep_sec - ) - + '"{}"'.format(elem_to_str(dev_element)), - ) - time.sleep(sleep_sec) - logging.info("[ OK ] All done!") From 27101b6fb933334cad737b67da13b1236393bd98 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 10 Dec 2024 23:10:47 +0800 Subject: [PATCH 14/79] fix: remove videorate and add unref --- .../base/bin/camera_test_auto_gst_source.py | 75 ++++++++----------- 1 file changed, 30 insertions(+), 45 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index c301ca7980..66ebe748b1 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -79,13 +79,6 @@ def parse_args(): "before taking the photo. Default = 2.", default=2, ) - # photo_subparser.add_argument( - # "-ac", - # "--all-caps", - # action="store_true", - # help="Take a photo for each camera capability. " - # "The --wait-seconds is in effect for all of them. ", - # ) photo_subparser.add_argument( "-p", "--path", @@ -186,6 +179,8 @@ def run_pipeline( when the pipeline started running :raises RuntimeError: When set_state(PLAYING) fails """ + bus = pipeline.get_bus() + assert bus # pipeline needs to start within 5 seconds def start(): @@ -197,7 +192,7 @@ def start(): pipeline.set_state(Gst.State.NULL) raise RuntimeError("Failed to transition to playing state") - def quit(): + def graceful_quit(): logger.debug("Sending EOS.") # Terminate gracefully with EOS. # Directly setting it to null can cause videos to have timestamp issues @@ -206,8 +201,6 @@ def quit(): if not eos_handled: logging.error("EOS was not handled by the pipeline. ") - bus = pipeline.get_bus() - assert bus # at this point the previous signal_watch can be overriden # (we are in the handler) bus.add_signal_watch() @@ -215,18 +208,27 @@ def quit(): pipeline.set_state(Gst.State.NULL) main_loop.quit() + def quit(): + logger.debug("Setting state to NULL.") + pipeline.set_state(Gst.State.NULL) + main_loop.quit() + # Must explicitly unref, otherwise source is never released + # not sure why graceful_quite doesn't need this + pipeline.unref() + start() logger.info(f"[ OK ] Pipeline is playing!") for delay, call in intermediate_calls: - assert ( - delay < run_n_seconds - ), "delay for each call must be smaller than total run seconds" + assert run_n_seconds == -1 or delay < run_n_seconds, ( + "Delay for each call must be smaller than total run seconds, " + " (Got delay = {}, run_n_seconds = {})".format( + delay, run_n_seconds + ) + ) GLib.timeout_add_seconds(delay, call) if run_n_seconds == -1: - bus = pipeline.get_bus() - assert bus bus.add_signal_watch() bus.connect( "message", @@ -235,7 +237,7 @@ def quit(): and quit(), ) else: - GLib.timeout_add_seconds(run_n_seconds, quit) + GLib.timeout_add_seconds(run_n_seconds, graceful_quit) main_loop.run() @@ -306,11 +308,9 @@ def take_photo( 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert name=converter", # 2 - "videorate drop-only=True", # 3 - 'capsfilter name=videorate-caps caps="video/x-raw,framerate=1/{}"', # 4 - "jpegenc", # 5 - "valve name=photo-valve", # 6 - "filesink location={}".format(file_path), # 7 + "valve name=photo-valve drop=True", # 4 + "jpegenc snapshot=True", # 3 + "filesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -331,19 +331,9 @@ def take_photo( # else case is using decodebin as a fallback else: # decode bin doesn't work with video/x-raw - # videorate doesn't work if source-caps was not created - str_elements[0] = str_elements[1] = str_elements[3] = str_elements[ - 4 - ] = "" + str_elements[0] = str_elements[1] = str_elements[3] = "" head_elem_name = "converter" - if delay_seconds > 0: - # if this caps filter was deleted earlier, this does nothing - str_elements[4] = str_elements[4].format(delay_seconds) - else: - str_elements[3] = "" - str_elements[4] = "" - partial = " ! ".join(elem for elem in str_elements if elem) pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline head_elem = pipeline.get_by_name(head_elem_name) @@ -365,7 +355,7 @@ def open_valve(): valve.set_property("drop", False) logging.info( - "[ OK ] Created photo pipeline with {} second delay".format( + "[ OK ] Created photo pipeline with {} second delay.".format( delay_seconds ) ) @@ -374,16 +364,11 @@ def open_valve(): run_pipeline( pipeline, - delay_seconds - + 1, # run the pipeline for 1 more second to take tha actual photo + -1, [(delay_seconds, open_valve)], ) - logging.info( - "[ OK ] Photo for this capability: " - + "{}".format(caps.to_string() if caps else "[device default]") - + " was saved to {}".format(file_path) - ) + logging.info("[ OK ] Photo was saved to {}".format(file_path)) def record_video( @@ -395,7 +380,7 @@ def record_video( ): assert file_path.endswith( ".mkv" - ), "This function uses matroshkamux, so the filename must end in .mkv" + ), "This function uses matroskamux, so the filename must end in .mkv" str_elements = [ 'capsfilter name=source-caps caps="{}"', # 0 @@ -507,14 +492,14 @@ def main(): dev_element = device.create_element() all_fixed_caps = get_all_fixated_caps(device.get_caps()) logging.info( - "Test for this device may take {} seconds.".format( - len(all_fixed_caps) * seconds_per_pipeline + "---- Test for this device may take {} seconds for {} caps. ----".format( + len(all_fixed_caps) * seconds_per_pipeline, len(all_fixed_caps) ) ) for cap_i, capability in enumerate(all_fixed_caps): if args.subcommand == "take-photo": logging.info( - "[ INFO ] Taking a photo with capability: " + "Taking a photo with capability: " + '"{}"'.format(capability.to_string()) + "for device: " + '"{}"'.format(device.get_display_name()), @@ -542,7 +527,7 @@ def main(): if __name__ == "__main__": old_env = os.environ.get("GST_DEBUG", None) - os.environ["GST_DEBUG"] = "2" # error and warnings + os.environ["GST_DEBUG"] = "3" # error and warnings main() From a07897d09d4320400eff2b35fe77fbe44c0d8e30 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 10:24:39 +0800 Subject: [PATCH 15/79] fix: temp workaround for weird err message after program is done --- providers/base/bin/camera_test_auto_gst_source.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 66ebe748b1..6ba13f7901 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -200,9 +200,12 @@ def graceful_quit(): if not eos_handled: logging.error("EOS was not handled by the pipeline. ") + pipeline.set_state(Gst.State.NULL) # force stop + main_loop.quit() + return - # at this point the previous signal_watch can be overriden - # (we are in the handler) + # at this point any previous signal_watch can be overriden + # pipeline is already finished bus.add_signal_watch() bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS) pipeline.set_state(Gst.State.NULL) @@ -213,8 +216,9 @@ def quit(): pipeline.set_state(Gst.State.NULL) main_loop.quit() # Must explicitly unref, otherwise source is never released - # not sure why graceful_quite doesn't need this - pipeline.unref() + # not sure why graceful_quit doesn't need this + if pipeline.ref_count > 1: + pipeline.unref() start() logger.info(f"[ OK ] Pipeline is playing!") @@ -364,7 +368,7 @@ def open_valve(): run_pipeline( pipeline, - -1, + delay_seconds + 1, [(delay_seconds, open_valve)], ) From 28390fbd61c0a9d41e41388a13491659d82c4d03 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:16:50 +0800 Subject: [PATCH 16/79] feat: play video handler --- providers/base/bin/camera_test_auto_gst_source.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 6ba13f7901..f4e621dca0 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -459,16 +459,22 @@ def main(): args = parse_args() print(args) - if not os.path.isdir(args.path): - # must validate early, filesink does not check if the path exists - raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) - if os.getuid() == 0: logging.warning( "Running this script as root. " "This may lead to different results than running as regular user." ) + + if args.subcommand == 'play-video': + play_video(args.path) + return + + if not os.path.isdir(args.path): + # must validate early, filesink does not check if the path exists + raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) + + devices = get_devices() if len(devices) == 0: From 75694b61d2198abc20d8b2b4dadf9c2467f14703 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:47:42 +0800 Subject: [PATCH 17/79] fix: remove unnecessary bus.add_signal_watch call --- providers/base/bin/camera_test_auto_gst_source.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index f4e621dca0..fc1ef1d651 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -204,9 +204,6 @@ def graceful_quit(): main_loop.quit() return - # at this point any previous signal_watch can be overriden - # pipeline is already finished - bus.add_signal_watch() bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS) pipeline.set_state(Gst.State.NULL) main_loop.quit() @@ -368,7 +365,7 @@ def open_valve(): run_pipeline( pipeline, - delay_seconds + 1, + delay_seconds + 1, # workaround for now, weird problem with ref count [(delay_seconds, open_valve)], ) @@ -465,8 +462,7 @@ def main(): "This may lead to different results than running as regular user." ) - - if args.subcommand == 'play-video': + if args.subcommand == "play-video": play_video(args.path) return @@ -474,7 +470,6 @@ def main(): # must validate early, filesink does not check if the path exists raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) - devices = get_devices() if len(devices) == 0: From ce2b4497e9bc24e639797b8855f7d20f312c100c Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:51:13 +0800 Subject: [PATCH 18/79] fix: add all timeouts before running --- providers/base/bin/camera_test_auto_gst_source.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index fc1ef1d651..4911ad77ae 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -217,9 +217,6 @@ def quit(): if pipeline.ref_count > 1: pipeline.unref() - start() - logger.info(f"[ OK ] Pipeline is playing!") - for delay, call in intermediate_calls: assert run_n_seconds == -1 or delay < run_n_seconds, ( "Delay for each call must be smaller than total run seconds, " @@ -240,6 +237,8 @@ def quit(): else: GLib.timeout_add_seconds(run_n_seconds, graceful_quit) + start() + logger.info(f"[ OK ] Pipeline is playing!") main_loop.run() From 206ca646fa98b77f3deab352c4e3827b2510794f Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:35:06 +0800 Subject: [PATCH 19/79] fix: snapshot=True doesn't exist on 16 --- .../base/bin/camera_test_auto_gst_source.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 4911ad77ae..71642ce3e0 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -30,13 +30,6 @@ from gi.repository import GLib # type: ignore -Gst.init(None) -Gtk.init([]) -main_loop = GLib.MainLoop.new( # type: GLib.MainLoop - None, False # type: ignore -) - - def get_devices() -> T.List[Gst.Device]: monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor monitor.add_filter("Video/Source") @@ -181,6 +174,9 @@ def run_pipeline( """ bus = pipeline.get_bus() assert bus + main_loop = GLib.MainLoop.new( # type: GLib.MainLoop + None, False # type: ignore + ) # pipeline needs to start within 5 seconds def start(): @@ -212,7 +208,8 @@ def quit(): logger.debug("Setting state to NULL.") pipeline.set_state(Gst.State.NULL) main_loop.quit() - # Must explicitly unref, otherwise source is never released + # Must explicitly unref if ref_count is somehow not 1, + # otherwise source is never released # not sure why graceful_quit doesn't need this if pipeline.ref_count > 1: pipeline.unref() @@ -309,7 +306,7 @@ def take_photo( "decodebin", # 1 "videoconvert name=converter", # 2 "valve name=photo-valve drop=True", # 4 - "jpegenc snapshot=True", # 3 + "jpegenc", # 3 "filesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -533,7 +530,11 @@ def main(): old_env = os.environ.get("GST_DEBUG", None) os.environ["GST_DEBUG"] = "3" # error and warnings + Gst.init(None) + Gtk.init([]) main() if old_env: os.environ["GST_DEBUG"] = old_env + else: + del os.environ["GST_DEBUG"] From 7ca20dc3e972419df13ee39ea7c2c30c8eb0cc84 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:54:26 +0800 Subject: [PATCH 20/79] feat: dimension validation for photos --- .../base/bin/camera_test_auto_gst_source.py | 43 +++++++++++++++---- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 71642ce3e0..dfd37e4a41 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -3,10 +3,12 @@ from enum import Enum import os import time +import PIL.Image import gi from argparse import ArgumentParser import typing as T import logging +import PIL VoidFn = T.Callable[[], None] # takes nothing and returns nothing @@ -59,6 +61,26 @@ def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: return fixed_caps +def validate_image_dimensions( + image_file_path: str, expected_width: int, expected_height: int +) -> bool: + image = PIL.Image.open(image_file_path) + if image.width != expected_width: + logger.error( + "Image width mismatch. Expected = {}, actual = {}".format( + expected_width, image.width + ) + ) + if image.height != expected_height: + logger.error( + "Image height mismatch. Expected = {}, actual = {}".format( + expected_height, image.height + ) + ) + + return image.width != expected_width and image.height == expected_height + + def parse_args(): parser = ArgumentParser() @@ -352,11 +374,9 @@ def open_valve(): valve.set_property("drop", False) logging.info( - "[ OK ] Created photo pipeline with {} second delay.".format( - delay_seconds - ) + "Created photo pipeline with {} second delay.".format(delay_seconds) + + '"{} ! {}"'.format(elem_to_str(source), partial) ) - logging.info("{} ! {}".format(elem_to_str(source), partial)) logging.debug("Setting playing state") run_pipeline( @@ -498,6 +518,7 @@ def main(): ) ) for cap_i, capability in enumerate(all_fixed_caps): + cap_struct = capability.get_structure(0) if args.subcommand == "take-photo": logging.info( "Taking a photo with capability: " @@ -505,13 +526,19 @@ def main(): + "for device: " + '"{}"'.format(device.get_display_name()), ) + file_path = "{}/photo_dev_{}_cap_{}.jpeg".format( + args.path, dev_i, cap_i + ) take_photo( dev_element, delay_seconds=args.wait_seconds, caps=capability, - file_path="{}/photo_dev_{}_cap_{}.jpeg".format( - args.path, dev_i, cap_i - ), + file_path=file_path, + ) + validate_image_dimensions( + file_path, + cap_struct.get_int("width").value, + cap_struct.get_int("height").value, ) elif args.subcommand == "record-video": record_video( @@ -528,7 +555,7 @@ def main(): if __name__ == "__main__": old_env = os.environ.get("GST_DEBUG", None) - os.environ["GST_DEBUG"] = "3" # error and warnings + os.environ["GST_DEBUG"] = "2" # error and warnings Gst.init(None) Gtk.init([]) From 798b97a3ff6178e8eb5a6749a1bac8bd19015647 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:18:06 +0800 Subject: [PATCH 21/79] feat: validate video info --- .../base/bin/camera_test_auto_gst_source.py | 71 +++++++++++++++++-- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index dfd37e4a41..f810fdbca5 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -10,6 +10,8 @@ import logging import PIL +# from checkbox_support.helpers.timeout import run_with_timeout + VoidFn = T.Callable[[], None] # takes nothing and returns nothing # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py @@ -26,7 +28,7 @@ from gi.repository import Gtk # type: ignore gi.require_version("Gst", "1.0") -from gi.repository import Gst # type: ignore +from gi.repository import Gst, GstPbutils # type: ignore gi.require_version("GLib", "2.0") from gi.repository import GLib # type: ignore @@ -43,6 +45,58 @@ def get_devices() -> T.List[Gst.Device]: return devices +def validate_video_info( + video_file_path: str, + *, + expected_width: int, + expected_height: int, + expected_duration_seconds: int, + duration_tolerance_seconds=0.1, +) -> bool: + discoverer = GstPbutils.Discoverer() + + video_file_path.removeprefix("/") + info = discoverer.discover_uri("file://" + video_file_path) + duration = info.get_duration() # type: int # This is in nanoseconds + video_track = info.get_stream_info().get_streams()[0] + width = video_track.get_width() + height = video_track.get_height() + + all_passed = True + + print( + duration, + expected_duration_seconds * 10**9, + duration - expected_duration_seconds * 10**9, + ) + if ( + abs(duration - expected_duration_seconds * 10**9) + > duration_tolerance_seconds * 10**9 + ): + logging.error( + "Duration not within tolerance. Got {}ns, but expected {} +- {}s".format( + duration, expected_duration_seconds, duration_tolerance_seconds + ) + ) + all_passed = False + if width != expected_width: + logger.error( + "Video width mismatch. Expected = {}, actual = {}".format( + expected_width, width + ) + ) + all_passed = False + if height != expected_height: + logger.error( + "Video height mismatch. Expected = {}, actual = {}".format( + expected_height, height + ) + ) + all_passed = False + + return all_passed + + def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps object @@ -541,14 +595,21 @@ def main(): cap_struct.get_int("height").value, ) elif args.subcommand == "record-video": + file_path = "{}/video_dev_{}_cap_{}.mkv".format( + args.path, dev_i, cap_i + ) record_video( dev_element, - file_path="{}/video_dev_{}_cap_{}.mkv".format( - args.path, dev_i, cap_i - ), + file_path=file_path, caps=capability, record_n_seconds=args.seconds, ) + validate_video_info( + file_path, + expected_duration_seconds=args.seconds, + expected_width=cap_struct.get_int("width").value, + expected_height=cap_struct.get_int("height").value, + ) logging.info("[ OK ] All done!") @@ -559,6 +620,8 @@ def main(): Gst.init(None) Gtk.init([]) + GstPbutils.pb_utils_init() + main() if old_env: From 41e90c058960e8bbc066e20c0ac1938c889a9a57 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:25:58 +0800 Subject: [PATCH 22/79] fix: add a cap for get_all_caps --- providers/base/bin/camera_test_auto_gst_source.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index f810fdbca5..af95baaee3 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -97,13 +97,13 @@ def validate_video_info( return all_passed -def get_all_fixated_caps(caps: Gst.Caps) -> T.List[Gst.Caps]: +def get_all_fixated_caps(caps: Gst.Caps, maximum=100) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps object :param caps: A mixed Gst.Caps """ fixed_caps = [] - while not caps.is_fixed(): + while not caps.is_fixed() and len(fixed_caps) < maximum: # keep fixating it until it's fixed fixed_cap = caps.fixate() fixed_caps.append(fixed_cap) @@ -476,7 +476,6 @@ def record_video( str_elements[1] = "" else: # decodebin doesn't work with video/x-raw - # videorate doesn't work if source-caps was not created str_elements[0] = str_elements[1] = "" head_elem_name = "converter" @@ -566,11 +565,14 @@ def main(): for dev_i, device in enumerate(devices): dev_element = device.create_element() all_fixed_caps = get_all_fixated_caps(device.get_caps()) + + logging.info("Testing device {}/{}", dev_i + 1, len(devices)) logging.info( - "---- Test for this device may take {} seconds for {} caps. ----".format( + "Test for this device may take {} seconds for {} caps.".format( len(all_fixed_caps) * seconds_per_pipeline, len(all_fixed_caps) ) ) + for cap_i, capability in enumerate(all_fixed_caps): cap_struct = capability.get_structure(0) if args.subcommand == "take-photo": From 19f6c5f29fea3b9fa4734f3f64947f81c68ede04 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:26:55 +0800 Subject: [PATCH 23/79] fix: remove print --- providers/base/bin/camera_test_auto_gst_source.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index af95baaee3..a9b812009a 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -64,11 +64,6 @@ def validate_video_info( all_passed = True - print( - duration, - expected_duration_seconds * 10**9, - duration - expected_duration_seconds * 10**9, - ) if ( abs(duration - expected_duration_seconds * 10**9) > duration_tolerance_seconds * 10**9 From 9e8cdf30158664cfeb5571db704bbbf4e66a5d10 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:32:16 +0800 Subject: [PATCH 24/79] fix: logger bad args --- providers/base/bin/camera_test_auto_gst_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index a9b812009a..ce6bd8711d 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -561,7 +561,7 @@ def main(): dev_element = device.create_element() all_fixed_caps = get_all_fixated_caps(device.get_caps()) - logging.info("Testing device {}/{}", dev_i + 1, len(devices)) + logging.info("Testing device {}/{}".format(dev_i + 1, len(devices))) logging.info( "Test for this device may take {} seconds for {} caps.".format( len(all_fixed_caps) * seconds_per_pipeline, len(all_fixed_caps) From f75e28d71528a7f5d2f67761f2ccc8613f7597ce Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:34:29 +0800 Subject: [PATCH 25/79] fix: consistent param style --- providers/base/bin/camera_test_auto_gst_source.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index ce6bd8711d..a38769f741 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -111,7 +111,7 @@ def get_all_fixated_caps(caps: Gst.Caps, maximum=100) -> T.List[Gst.Caps]: def validate_image_dimensions( - image_file_path: str, expected_width: int, expected_height: int + image_file_path: str, *, expected_width: int, expected_height: int ) -> bool: image = PIL.Image.open(image_file_path) if image.width != expected_width: @@ -588,8 +588,8 @@ def main(): ) validate_image_dimensions( file_path, - cap_struct.get_int("width").value, - cap_struct.get_int("height").value, + expected_width=cap_struct.get_int("width").value, + expected_height=cap_struct.get_int("height").value, ) elif args.subcommand == "record-video": file_path = "{}/video_dev_{}_cap_{}.mkv".format( From f2db3b98a3768de1af90428eb55db0fdf3378894 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 12 Dec 2024 10:14:28 +0800 Subject: [PATCH 26/79] feat: skip validation flag --- .../base/bin/camera_test_auto_gst_source.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index a38769f741..82c9af860f 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -150,6 +150,11 @@ def parse_args(): help="Where to save the file. This should be a directory.", required=True, ) + photo_subparser.add_argument( + "--skip-validation", + action="store_true", + help="Skip image dimension validation", + ) video_subparser = subparser.add_parser("record-video") video_subparser.add_argument( @@ -166,6 +171,23 @@ def parse_args(): help="Where to save the file. This should be a directory.", required=True, ) + video_subparser.add_argument( + "-t", + "--tolerance", + type=float, + help=( + "Tolerance for validating the recording duration in seconds. " + "Ex. If the video is supposed to be 5s, tolerance is 0.1s, " + "then durations in [4.9s, 5.1s] inclusive will pass the validation" + "Default is 0.1s." + ), + default=0.1, + ) + video_subparser.add_argument( + "--skip-validation", + action="store_true", + help="Skip video dimension & duration validation", + ) player_subparser = subparser.add_parser("play-video") player_subparser.add_argument( @@ -586,6 +608,10 @@ def main(): caps=capability, file_path=file_path, ) + + if args.skip_validation: + continue + validate_image_dimensions( file_path, expected_width=cap_struct.get_int("width").value, @@ -601,6 +627,10 @@ def main(): caps=capability, record_n_seconds=args.seconds, ) + + if args.skip_validation: + continue + validate_video_info( file_path, expected_duration_seconds=args.seconds, From 770f87cb5aca07424aa652942fa354979b53f3ca Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 12 Dec 2024 12:29:18 +0800 Subject: [PATCH 27/79] fix: wait 1 more second for eos --- .../base/bin/camera_test_auto_gst_source.py | 111 +++++++++++------- 1 file changed, 70 insertions(+), 41 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 82c9af860f..0894a78b65 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -2,33 +2,29 @@ from enum import Enum import os -import time import PIL.Image import gi from argparse import ArgumentParser import typing as T import logging -import PIL - -# from checkbox_support.helpers.timeout import run_with_timeout +import time VoidFn = T.Callable[[], None] # takes nothing and returns nothing # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py +logger = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s %(levelname)s - %(message)s", datefmt="%m/%d %H:%M:%S", - level=logging.DEBUG, ) -logger = logging.getLogger(__name__) - +logger.setLevel(logging.DEBUG) gi.require_version("Gtk", "3.0") from gi.repository import Gtk # type: ignore gi.require_version("Gst", "1.0") -from gi.repository import Gst, GstPbutils # type: ignore +from gi.repository import Gst, GstPbutils # , GstPbutils # type: ignore gi.require_version("GLib", "2.0") from gi.repository import GLib # type: ignore @@ -68,9 +64,11 @@ def validate_video_info( abs(duration - expected_duration_seconds * 10**9) > duration_tolerance_seconds * 10**9 ): - logging.error( - "Duration not within tolerance. Got {}ns, but expected {} +- {}s".format( - duration, expected_duration_seconds, duration_tolerance_seconds + logger.error( + "Duration not within tolerance. Got {}s, but expected {} +- {}s".format( + round(duration / (10**9), 3), + expected_duration_seconds, + duration_tolerance_seconds, ) ) all_passed = False @@ -105,7 +103,15 @@ def get_all_fixated_caps(caps: Gst.Caps, maximum=100) -> T.List[Gst.Caps]: caps = caps.subtract(fixed_cap) # this is useful to get around missing types # in default gst python binding on ubuntu, like Gst.Fraction - fixed_caps.append(caps) # append the final one + + if caps.is_fixed(): + fixed_caps.append(caps) # append the final one + else: + logger.error( + "Maximum cap amount reached: {}. Skipping the rest.".format( + maximum + ) + ) return fixed_caps @@ -155,6 +161,14 @@ def parse_args(): action="store_true", help="Skip image dimension validation", ) + photo_subparser.add_argument( + "--max-caps", + type=int, + help="Set the maximum number of capabilities to check for each device. " + "Default = 100. " + "This is useful for restraining the number of caps on devices " + 'that have "continuous" caps.', + ) video_subparser = subparser.add_parser("record-video") video_subparser.add_argument( @@ -250,7 +264,8 @@ def elem_to_str(element: Gst.Element) -> str: def run_pipeline( pipeline: Gst.Pipeline, - run_n_seconds: int = -1, + run_n_seconds: T.Optional[int] = None, + force_kill_timeout: int = 300, intermediate_calls: T.List[T.Tuple[int, VoidFn]] = [], ): """Run the pipeline @@ -258,7 +273,10 @@ def run_pipeline( :param pipeline: Gst.Pipeline. All element creation/linking steps should be done by this point :param run_n_seconds: how long until we stop the main loop. - - If -1, only wait for EOS + - If None, only wait for EOS. + :param force_kill_timeout: how long until a force kill is triggered. + - If None and run_n_seconds != None, then force_kill = run_n_seconds * 2 + - If != None and run_n_seconds != None, an error is raised if force kill <= run_n_seconds :param intermedate_calls: a list of functions to call while the pipeline is running. list[(() -> None, int)], where 2nd elem is the number of seconds to wait RELATIVE to @@ -288,15 +306,34 @@ def graceful_quit(): eos_handled = pipeline.send_event(Gst.Event.new_eos()) if not eos_handled: - logging.error("EOS was not handled by the pipeline. ") + logger.error("EOS was not handled by the pipeline. ") pipeline.set_state(Gst.State.NULL) # force stop main_loop.quit() return - bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS) + if not force_kill_timeout and run_n_seconds: + bus_pop_timeout = run_n_seconds * 2 + else: + bus_pop_timeout = force_kill_timeout + + time.sleep(1) + + # it's possible to immediately pop None (got EOS, but message is None) + # so wait 1 second for the message to be constructed before popping + eos_msg = bus.timed_pop_filtered(bus_pop_timeout, Gst.MessageType.EOS) pipeline.set_state(Gst.State.NULL) main_loop.quit() + if eos_msg is None: + # have to force system exit here, + # GLib.Mainloop overrides the sys.excepthook + raise SystemExit( + "Did not receive EOS after {} seconds. ".format( + bus_pop_timeout + ) + + "Pipeline likely hanged." + ) + def quit(): logger.debug("Setting state to NULL.") pipeline.set_state(Gst.State.NULL) @@ -308,7 +345,7 @@ def quit(): pipeline.unref() for delay, call in intermediate_calls: - assert run_n_seconds == -1 or delay < run_n_seconds, ( + assert run_n_seconds is None or delay < run_n_seconds, ( "Delay for each call must be smaller than total run seconds, " " (Got delay = {}, run_n_seconds = {})".format( delay, run_n_seconds @@ -316,7 +353,7 @@ def quit(): ) GLib.timeout_add_seconds(delay, call) - if run_n_seconds == -1: + if run_n_seconds is None: bus.add_signal_watch() bus.connect( "message", @@ -367,7 +404,7 @@ def display_viewfinder( assert head assert source.link(head) - logging.info( + logger.info( "[ OK ] Created pipeline for viewfinder: {} ! {}".format( elem_to_str(source), partial_pipeline ) @@ -444,7 +481,7 @@ def open_valve(): logging.debug("Opening valve!") valve.set_property("drop", False) - logging.info( + logger.info( "Created photo pipeline with {} second delay.".format(delay_seconds) + '"{} ! {}"'.format(elem_to_str(source), partial) ) @@ -453,10 +490,10 @@ def open_valve(): run_pipeline( pipeline, delay_seconds + 1, # workaround for now, weird problem with ref count - [(delay_seconds, open_valve)], + intermediate_calls=[(delay_seconds, open_valve)], ) - logging.info("[ OK ] Photo was saved to {}".format(file_path)) + logger.info("[ OK ] Photo was saved to {}".format(file_path)) def record_video( @@ -511,17 +548,17 @@ def record_video( head_elem ), "Could not link source element to {}".format(head_elem) - logging.info( + logger.info( "[ OK ] Created video pipeline to record {} seconds".format( record_n_seconds ) ) - logging.info("{} ! {}".format(elem_to_str(source), partial)) + logger.info("{} ! {}".format(elem_to_str(source), partial)) logging.debug("Setting playing state") run_pipeline(pipeline, record_n_seconds) - logging.info( + logger.info( "[ OK ] Video for this capability: " + "{}".format(caps.to_string() if caps else "[device default]") + " was saved to {}".format(file_path) @@ -543,7 +580,7 @@ def main(): print(args) if os.getuid() == 0: - logging.warning( + logger.warning( "Running this script as root. " "This may lead to different results than running as regular user." ) @@ -559,7 +596,7 @@ def main(): devices = get_devices() if len(devices) == 0: - logging.error( + logger.error( "GStreamer cannot find any cameras on this device. " "If you know a camera element exists, then it did not implement " "Gst.DeviceProvider to make itself visible to GStreamer " @@ -570,7 +607,7 @@ def main(): seconds_per_pipeline = ( args.wait_seconds if args.subcommand == "take-photo" else args.seconds ) - logging.info("Found {} cameras!".format(len(devices))) + logger.info("Found {} cameras!".format(len(devices))) print( '[ HINT ] For debugging, remove the "valve" element to get a pipeline', "that can be run with gst-launch-1.0", @@ -583,8 +620,8 @@ def main(): dev_element = device.create_element() all_fixed_caps = get_all_fixated_caps(device.get_caps()) - logging.info("Testing device {}/{}".format(dev_i + 1, len(devices))) - logging.info( + logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) + logger.info( "Test for this device may take {} seconds for {} caps.".format( len(all_fixed_caps) * seconds_per_pipeline, len(all_fixed_caps) ) @@ -593,7 +630,7 @@ def main(): for cap_i, capability in enumerate(all_fixed_caps): cap_struct = capability.get_structure(0) if args.subcommand == "take-photo": - logging.info( + logger.info( "Taking a photo with capability: " + '"{}"'.format(capability.to_string()) + "for device: " @@ -638,20 +675,12 @@ def main(): expected_height=cap_struct.get_int("height").value, ) - logging.info("[ OK ] All done!") + logger.info("[ OK ] All done!") if __name__ == "__main__": - old_env = os.environ.get("GST_DEBUG", None) - os.environ["GST_DEBUG"] = "2" # error and warnings - Gst.init(None) - Gtk.init([]) GstPbutils.pb_utils_init() + Gtk.init([]) main() - - if old_env: - os.environ["GST_DEBUG"] = old_env - else: - del os.environ["GST_DEBUG"] From 78135d9d595f4e119c72e04bdb44e48fc23fd27e Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 12 Dec 2024 12:40:20 +0800 Subject: [PATCH 28/79] style:formatting --- providers/base/bin/camera_test_auto_gst_source.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 0894a78b65..4af1adcebc 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -395,7 +395,7 @@ def display_viewfinder( do that before calling this function :param show_n_seconds: number of seconds to keep the viewfinder on screen """ - + partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline head = pipeline.get_by_name("head") @@ -482,7 +482,7 @@ def open_valve(): valve.set_property("drop", False) logger.info( - "Created photo pipeline with {} second delay.".format(delay_seconds) + "Created photo pipeline with {} second delay. ".format(delay_seconds) + '"{} ! {}"'.format(elem_to_str(source), partial) ) logging.debug("Setting playing state") From 945d6a5c163d7788d14d74638cdd5efa724099ca Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 16 Dec 2024 16:23:42 +0800 Subject: [PATCH 29/79] fix: add missing gi require --- providers/base/bin/camera_test_auto_gst_source.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 4af1adcebc..d4cece16ce 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -13,6 +13,9 @@ # https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py +# detect intrange +# http://gstreamer-devel.230.s1.nabble.com/gstreamer-python-binding-and-intRange-td969231.html#a969232 + logger = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s %(levelname)s - %(message)s", @@ -24,7 +27,8 @@ from gi.repository import Gtk # type: ignore gi.require_version("Gst", "1.0") -from gi.repository import Gst, GstPbutils # , GstPbutils # type: ignore +gi.require_version("GstPbutils", "1.0") +from gi.repository import Gst, GstPbutils # type: ignore gi.require_version("GLib", "2.0") from gi.repository import GLib # type: ignore @@ -395,7 +399,7 @@ def display_viewfinder( do that before calling this function :param show_n_seconds: number of seconds to keep the viewfinder on screen """ - + partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline head = pipeline.get_by_name("head") From d5b387b4e9be179f620177d2c4ece3c1120bddde Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 17 Dec 2024 22:34:03 +0800 Subject: [PATCH 30/79] feat: workaround for Gst.IntRange --- .../base/bin/camera_test_auto_gst_source.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index d4cece16ce..fbcfeb2ec0 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -94,6 +94,27 @@ def validate_video_info( return all_passed +def extract_int_range( + struct: Gst.Structure, prop_name: str +) -> tuple[int, int]: + """Bit of a hack to work around the missing Gst.IntRange type + + :param struct: structure whose prop_name property is a Gst.IntRange + :param prop_name: name of the property + :return: (low, high) integer tuple + """ + # the introspected class exists, but we can't construct it + assert struct.has_field_typed(prop_name, Gst.IntRange) + INT32_MIN = -2147483648 + INT32_MAX = 2147483647 + low = struct.copy() # type: Gst.Structure + high = struct.copy() # type: Gst.Structure + low.fixate_field_nearest_int(prop_name, INT32_MIN) + high.fixate_field_nearest_int(prop_name, INT32_MAX) + + return low.get_int(prop_name)[1], high.get_int(prop_name)[1] + + def get_all_fixated_caps(caps: Gst.Caps, maximum=100) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps object From 27b50e3ec0832009047b2be14d5c8d0b502a372a Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:23:49 +0800 Subject: [PATCH 31/79] feat: add caps resolver --- .../base/bin/camera_test_auto_gst_source.py | 329 ++++++++++++------ 1 file changed, 217 insertions(+), 112 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index fbcfeb2ec0..edffa28a7b 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -23,6 +23,8 @@ ) logger.setLevel(logging.DEBUG) +from gi.repository import GObject # type: ignore + gi.require_version("Gtk", "3.0") from gi.repository import Gtk # type: ignore @@ -45,120 +47,219 @@ def get_devices() -> T.List[Gst.Device]: return devices -def validate_video_info( - video_file_path: str, - *, - expected_width: int, - expected_height: int, - expected_duration_seconds: int, - duration_tolerance_seconds=0.1, -) -> bool: - discoverer = GstPbutils.Discoverer() - - video_file_path.removeprefix("/") - info = discoverer.discover_uri("file://" + video_file_path) - duration = info.get_duration() # type: int # This is in nanoseconds - video_track = info.get_stream_info().get_streams()[0] - width = video_track.get_width() - height = video_track.get_height() - - all_passed = True - - if ( - abs(duration - expected_duration_seconds * 10**9) - > duration_tolerance_seconds * 10**9 - ): - logger.error( - "Duration not within tolerance. Got {}s, but expected {} +- {}s".format( - round(duration / (10**9), 3), - expected_duration_seconds, - duration_tolerance_seconds, - ) - ) - all_passed = False - if width != expected_width: - logger.error( - "Video width mismatch. Expected = {}, actual = {}".format( - expected_width, width - ) - ) - all_passed = False - if height != expected_height: - logger.error( - "Video height mismatch. Expected = {}, actual = {}".format( - expected_height, height - ) - ) - all_passed = False - - return all_passed +class MediaValidator: + @staticmethod + def validate_image_dimensions( + image_file_path: str, + *, + expected_width: int, + expected_height: int, + ) -> bool: + image = PIL.Image.open(image_file_path) + passed = True -def extract_int_range( - struct: Gst.Structure, prop_name: str -) -> tuple[int, int]: - """Bit of a hack to work around the missing Gst.IntRange type + if image.width != expected_width: + passed = False + logger.error( + "Image width mismatch. Expected = {}, actual = {}".format( + expected_width, image.width + ) + ) + if image.height != expected_height: + passed = False + logger.error( + "Image height mismatch. Expected = {}, actual = {}".format( + expected_height, image.height + ) + ) - :param struct: structure whose prop_name property is a Gst.IntRange - :param prop_name: name of the property - :return: (low, high) integer tuple - """ - # the introspected class exists, but we can't construct it - assert struct.has_field_typed(prop_name, Gst.IntRange) - INT32_MIN = -2147483648 - INT32_MAX = 2147483647 - low = struct.copy() # type: Gst.Structure - high = struct.copy() # type: Gst.Structure - low.fixate_field_nearest_int(prop_name, INT32_MIN) - high.fixate_field_nearest_int(prop_name, INT32_MAX) + return passed - return low.get_int(prop_name)[1], high.get_int(prop_name)[1] + @staticmethod + def validate_video_info( + video_file_path: str, + *, + expected_width: int, + expected_height: int, + expected_duration_seconds: int, + duration_tolerance_seconds=0.1, + ) -> bool: + discoverer = GstPbutils.Discoverer() + video_file_path.removeprefix("/") + info = discoverer.discover_uri("file://" + video_file_path) + duration = info.get_duration() # type: int # This is in nanoseconds + video_track = info.get_stream_info().get_streams()[0] + width = video_track.get_width() + height = video_track.get_height() -def get_all_fixated_caps(caps: Gst.Caps, maximum=100) -> T.List[Gst.Caps]: - """Gets all the fixated(1 value per property) caps from a Gst.Caps object + passed = True - :param caps: A mixed Gst.Caps - """ - fixed_caps = [] - while not caps.is_fixed() and len(fixed_caps) < maximum: - # keep fixating it until it's fixed - fixed_cap = caps.fixate() - fixed_caps.append(fixed_cap) - caps = caps.subtract(fixed_cap) - # this is useful to get around missing types - # in default gst python binding on ubuntu, like Gst.Fraction - - if caps.is_fixed(): - fixed_caps.append(caps) # append the final one - else: - logger.error( - "Maximum cap amount reached: {}. Skipping the rest.".format( - maximum + if ( + abs(duration - expected_duration_seconds * 10**9) + > duration_tolerance_seconds * 10**9 + ): + logger.error( + "Duration not within tolerance. Got {}s, but expected {} +- {}s".format( + round(duration / (10**9), 3), + expected_duration_seconds, + duration_tolerance_seconds, + ) ) - ) + passed = False + if width != expected_width: + logger.error( + "Video width mismatch. Expected = {}, actual = {}".format( + expected_width, width + ) + ) + passed = False + if height != expected_height: + logger.error( + "Video height mismatch. Expected = {}, actual = {}".format( + expected_height, height + ) + ) + passed = False - return fixed_caps + return passed -def validate_image_dimensions( - image_file_path: str, *, expected_width: int, expected_height: int -) -> bool: - image = PIL.Image.open(image_file_path) - if image.width != expected_width: - logger.error( - "Image width mismatch. Expected = {}, actual = {}".format( - expected_width, image.width - ) - ) - if image.height != expected_height: - logger.error( - "Image height mismatch. Expected = {}, actual = {}".format( - expected_height, image.height - ) +class CapsResolver: + INT32_MIN = -2147483648 + INT32_MAX = 2147483647 + + # (top, bottom) or (numerator, denominator) + FractionTuple = tuple[int, int] + # Used when we encounter IntRange or FractionRange types + # Simply fixating the caps will produce too many caps, + # so we restrict to these common ones + RangeResolveMethod = T.Literal["remap", "limit"] + RANGE_REMAP = { + "width": [640, 1280, 1920, 2560, 3840], + "height": [480, 720, 1080, 1440, 2160], + "framerate": [(15, 1), (30, 1), (60, 1)], # 15fpx, 30fps, 60fps + } + + def extract_fraction_range( + self, struct: Gst.Structure, prop_name: str + ) -> tuple[FractionTuple, FractionTuple]: + """Extracts (low, high) fraction range from a Gst.Structure + + :param struct: structure whose prop_name is a Gst.FractionRange + :param prop_name: name of the property + :return: (low, high) fraction tuple + - NOTE: low is defined as having a smaller numerator + """ + assert struct.has_field_typed(prop_name, Gst.FractionRange) + low = struct.copy() # type: Gst.Structure + high = struct.copy() # type: Gst.Structure + low.fixate_field_nearest_fraction(prop_name, 0, 1) + high.fixate_field_nearest_fraction(prop_name, self.INT32_MAX, 1) + + return ( + low.get_fraction(prop_name)[1:], + high.get_fraction(prop_name)[1:], ) - return image.width != expected_width and image.height == expected_height + def extract_int_range( + self, struct: Gst.Structure, prop_name: str + ) -> tuple[int, int]: + """Bit of a hack to work around the missing Gst.IntRange type + + :param struct: structure whose prop_name property is a Gst.IntRange + :param prop_name: name of the property + :return: (low, high) integer tuple + """ + # the introspected class exists, but we can't construct it + assert struct.has_field_typed(prop_name, Gst.IntRange) + + low = struct.copy() # type: Gst.Structure + high = struct.copy() # type: Gst.Structure + low.fixate_field_nearest_int(prop_name, self.INT32_MIN) + high.fixate_field_nearest_int(prop_name, self.INT32_MAX) + + # get_int returns a (success, value) tuple + return low.get_int(prop_name)[1], high.get_int(prop_name)[1] + + def remap_range_to_list( + self, + prop: str, + low: T.Union[int, FractionTuple], + high: T.Union[int, FractionTuple], + ) -> GObject.ValueArray: + """Creates a GObject.ValueArray based on range + that can be used in Gst.Caps + + :param low: min value, inclusive + :param high: max value, inclusive + :return: ValueArray object. Usage: Caps.set_property(prop, value_array) + """ + out = GObject.ValueArray() + assert ( + prop in self.RANGE_REMAP + ), "Property {} does not have a remap definition".format(prop) + + for val in self.RANGE_REMAP[prop]: + # lt gt are defined as pairwise comparison on tuples + if val >= low and val <= high: + out.append(val) + + return out + + def get_all_fixated_caps( + self, + caps: Gst.Caps, + resolve_method: RangeResolveMethod, + limit: T.Optional[int] = None, + ) -> T.List[Gst.Caps]: + """Gets all the fixated(1 value per property) caps from a Gst.Caps object + + :param caps: a mixed Gst.Caps + :param resolve_method: how to resolve IntRange and FractionRange values + - Only applies to width, height, and framerate for now + - "remap" => picks out a set of common values within the original range + - "limit" => Use the caps.is_fixed while loop until we reaches limit + + :param limit: the limit to use for the "limit" resolver, ignored otherwise + :return: a list of fixed caps + """ + if caps.is_fixed(): + return [caps] + + fixed_caps = [] # type: list[Gst.Caps] + + print(f"\n{caps.get_size()}\n") + for i in range(caps.get_size()): + struct = caps.get_structure(i) + caps_i = Gst.Caps.from_string(struct.to_string()) # type: Gst.Caps + + if resolve_method == "remap": + for prop in self.RANGE_REMAP.keys(): + s_i = caps_i.get_structure(0) # type: Gst.Structure + + low, high = None, None + if s_i.has_field_typed(prop, Gst.IntRange): + low, high = self.extract_int_range(s_i, prop) + elif s_i.has_field_typed(prop, Gst.FractionRange): + low, high = self.extract_fraction_range(s_i, prop) + + if low is not None and high is not None: + s_i.set_value( + prop, + self.remap_range_to_list(prop, low, high), + ) + + while not caps_i.is_fixed(): + fixed_cap = caps_i.fixate() + fixed_caps.append(fixed_cap) + caps_i = caps_i.subtract(fixed_cap) + + if caps_i.is_fixed(): + fixed_caps.append(caps_i) + + return fixed_caps def parse_args(): @@ -218,9 +319,9 @@ def parse_args(): "Tolerance for validating the recording duration in seconds. " "Ex. If the video is supposed to be 5s, tolerance is 0.1s, " "then durations in [4.9s, 5.1s] inclusive will pass the validation" - "Default is 0.1s." + "Default is 0.5s." ), - default=0.1, + default=0.5, ) video_subparser.add_argument( "--skip-validation", @@ -260,7 +361,7 @@ def elem_to_str(element: Gst.Element) -> str: try: prop_value = element.get_property(prop.name) except: - logger.info( + logger.debug( "Property {} is unreadable in {}".format( prop.name, element_name ) # not every property is readable, ignore unreadable ones @@ -462,7 +563,7 @@ def take_photo( "videoconvert name=converter", # 2 "valve name=photo-valve drop=True", # 4 "jpegenc", # 3 - "filesink location={}".format(file_path), # 5 + "multifilesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -538,7 +639,7 @@ def record_video( "videoconvert name=converter", # 2 "jpegenc", # 3, avoid massiave uncompressed videos "matroskamux", # 4 - "filesink location={}".format(file_path), # 5 + "multifilesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -593,7 +694,7 @@ def record_video( videotestsrc num-buffers=120 ! queue ! encodebin profile="video/quicktime,variant=iso:video/x-h264" ! - filesink location=video.mp4 + multifilesink location=video.mp4 """ """decode filesrc location=video.mp4 ! decodebin ! autovideosink @@ -615,7 +716,7 @@ def main(): return if not os.path.isdir(args.path): - # must validate early, filesink does not check if the path exists + # must validate early, multifilesink does not check if the path exists raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) devices = get_devices() @@ -635,7 +736,7 @@ def main(): logger.info("Found {} cameras!".format(len(devices))) print( '[ HINT ] For debugging, remove the "valve" element to get a pipeline', - "that can be run with gst-launch-1.0", + 'that can be run with "gst-launch-1.0".', "Also keep the pipeline running for {} seconds".format( seconds_per_pipeline ), @@ -643,7 +744,10 @@ def main(): for dev_i, device in enumerate(devices): dev_element = device.create_element() - all_fixed_caps = get_all_fixated_caps(device.get_caps()) + resolver = CapsResolver() + all_fixed_caps = resolver.get_all_fixated_caps( + device.get_caps(), "remap" + ) logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) logger.info( @@ -674,7 +778,7 @@ def main(): if args.skip_validation: continue - validate_image_dimensions( + MediaValidator.validate_image_dimensions( file_path, expected_width=cap_struct.get_int("width").value, expected_height=cap_struct.get_int("height").value, @@ -693,11 +797,12 @@ def main(): if args.skip_validation: continue - validate_video_info( + MediaValidator.validate_video_info( file_path, expected_duration_seconds=args.seconds, expected_width=cap_struct.get_int("width").value, expected_height=cap_struct.get_int("height").value, + duration_tolerance_seconds=args.tolerance, ) logger.info("[ OK ] All done!") From 455b86d59dbe2854d840c4c9fe3fca895263480b Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 12:05:33 +0800 Subject: [PATCH 32/79] fix: handle GValueArray to GstValueList conversion --- providers/base/bin/camera_test_auto_gst_source.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index edffa28a7b..f0181957fd 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -246,13 +246,17 @@ def get_all_fixated_caps( low, high = self.extract_fraction_range(s_i, prop) if low is not None and high is not None: - s_i.set_value( + s_i.set_list( prop, self.remap_range_to_list(prop, low, high), ) - while not caps_i.is_fixed(): - fixed_cap = caps_i.fixate() + caps_i = Gst.Caps.from_string(s_i.to_string()) + + while not caps_i.is_fixed() and not caps_i.is_empty(): + fixed_cap = caps_i.fixate() # type: Gst.Caps + if fixed_cap.get_structure(0).get_name() == "video/x-bayer": + continue fixed_caps.append(fixed_cap) caps_i = caps_i.subtract(fixed_cap) @@ -812,5 +816,5 @@ def main(): Gst.init(None) GstPbutils.pb_utils_init() Gtk.init([]) - + main() From 6fd2e0f7c22f41e339651a6089b83d9dd45f2ce8 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 12:52:48 +0800 Subject: [PATCH 33/79] fix: workaround for list[Gst.Fraction] --- .../base/bin/camera_test_auto_gst_source.py | 52 +++++++++++++++---- 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index f0181957fd..cbb16d746b 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -144,7 +144,7 @@ class CapsResolver: def extract_fraction_range( self, struct: Gst.Structure, prop_name: str - ) -> tuple[FractionTuple, FractionTuple]: + ) -> T.Tuple[FractionTuple, FractionTuple]: """Extracts (low, high) fraction range from a Gst.Structure :param struct: structure whose prop_name is a Gst.FractionRange @@ -165,7 +165,7 @@ def extract_fraction_range( def extract_int_range( self, struct: Gst.Structure, prop_name: str - ) -> tuple[int, int]: + ) -> T.Tuple[int, int]: """Bit of a hack to work around the missing Gst.IntRange type :param struct: structure whose prop_name property is a Gst.IntRange @@ -183,12 +183,20 @@ def extract_int_range( # get_int returns a (success, value) tuple return low.get_int(prop_name)[1], high.get_int(prop_name)[1] + @T.overload + def remap_range_to_list( + self, prop: str, low: int, high: int + ) -> T.List[int]: ... + @T.overload + def remap_range_to_list( + self, prop: str, low: FractionTuple, high: FractionTuple + ) -> T.List[FractionTuple]: ... def remap_range_to_list( self, prop: str, low: T.Union[int, FractionTuple], high: T.Union[int, FractionTuple], - ) -> GObject.ValueArray: + ) -> T.List: """Creates a GObject.ValueArray based on range that can be used in Gst.Caps @@ -196,7 +204,7 @@ def remap_range_to_list( :param high: max value, inclusive :return: ValueArray object. Usage: Caps.set_property(prop, value_array) """ - out = GObject.ValueArray() + out = [] assert ( prop in self.RANGE_REMAP ), "Property {} does not have a remap definition".format(prop) @@ -208,6 +216,12 @@ def remap_range_to_list( return out + def list_to_gobject_value_array(self, l: T.List): + out = GObject.ValueArray() + for e in l: + out.append(e) + return out # this does not guarantee that out has a sensible value + def get_all_fixated_caps( self, caps: Gst.Caps, @@ -239,16 +253,36 @@ def get_all_fixated_caps( for prop in self.RANGE_REMAP.keys(): s_i = caps_i.get_structure(0) # type: Gst.Structure - low, high = None, None + finite_list = None # type GObject.ValueArray if s_i.has_field_typed(prop, Gst.IntRange): low, high = self.extract_int_range(s_i, prop) + finite_list = self.list_to_gobject_value_array( + self.remap_range_to_list(prop, low, high) + ) elif s_i.has_field_typed(prop, Gst.FractionRange): low, high = self.extract_fraction_range(s_i, prop) - - if low is not None and high is not None: + fraction_list = self.remap_range_to_list( + prop, low, high + ) + # workaround missing Gst.Fraction + # we can't directly create fraction objects + # but we can create a struct from str, then access it + temp = Gst.Structure.from_string( + "temp, {}={{{}}}".format( + prop, + ",".join( + "{}/{}".format(f[0], f[1]) + for f in fraction_list + ), + ) + )[0] + finite_list = temp.get_list(prop)[1] + + if finite_list is not None: + assert finite_list.n_values != 0 s_i.set_list( prop, - self.remap_range_to_list(prop, low, high), + finite_list, ) caps_i = Gst.Caps.from_string(s_i.to_string()) @@ -816,5 +850,5 @@ def main(): Gst.init(None) GstPbutils.pb_utils_init() Gtk.init([]) - + main() From cf73e678ffb892124a0c58b546bf091d3bd240b6 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:08:47 +0800 Subject: [PATCH 34/79] fix: import Gtk only when necessary --- .../base/bin/camera_test_auto_gst_source.py | 41 +++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index cbb16d746b..a878f71699 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -25,8 +25,7 @@ from gi.repository import GObject # type: ignore -gi.require_version("Gtk", "3.0") -from gi.repository import Gtk # type: ignore +Gtk = None gi.require_version("Gst", "1.0") gi.require_version("GstPbutils", "1.0") @@ -367,6 +366,15 @@ def parse_args(): help="Skip video dimension & duration validation", ) + viewfinder_subparser = subparser.add_parser("show-viewfinder") + viewfinder_subparser.add_argument( + "-s", + "--seconds", + type=int, + help="Show the viewfinder for n seconds", + default=10, + ) + player_subparser = subparser.add_parser("play-video") player_subparser.add_argument( "-p", @@ -534,6 +542,13 @@ def quit(): def play_video(filepath: str): + global Gtk + if not Gtk: + gi.require_version("Gtk", "3.0") + from gi.repository import Gtk # type: ignore + + Gtk.init([]) + pipeline = Gst.parse_launch( " ! ".join( [ @@ -559,6 +574,12 @@ def display_viewfinder( do that before calling this function :param show_n_seconds: number of seconds to keep the viewfinder on screen """ + global Gtk + if not Gtk: + gi.require_version("Gtk", "3.0") + from gi.repository import Gtk # type: ignore + + Gtk.init([]) partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline @@ -753,10 +774,6 @@ def main(): play_video(args.path) return - if not os.path.isdir(args.path): - # must validate early, multifilesink does not check if the path exists - raise FileNotFoundError('Path "{}" does not exist'.format(args.path)) - devices = get_devices() if len(devices) == 0: @@ -782,6 +799,17 @@ def main(): for dev_i, device in enumerate(devices): dev_element = device.create_element() + + if args.subcommand == "show-viewfinder": + display_viewfinder(dev_element, show_n_seconds=args.seconds) + continue + + if not os.path.isdir(args.path): + # must validate early, multifilesink does not check if the path exists + raise FileNotFoundError( + 'Path "{}" does not exist'.format(args.path) + ) + resolver = CapsResolver() all_fixed_caps = resolver.get_all_fixated_caps( device.get_caps(), "remap" @@ -849,6 +877,5 @@ def main(): if __name__ == "__main__": Gst.init(None) GstPbutils.pb_utils_init() - Gtk.init([]) main() From 720d2ec9c476cf548fe18ec73a9a5a158a167ff9 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:09:39 +0800 Subject: [PATCH 35/79] style: consistent naming --- providers/base/bin/camera_test_auto_gst_source.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index a878f71699..293daaa7ba 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -562,7 +562,7 @@ def play_video(filepath: str): run_pipeline(pipeline) -def display_viewfinder( +def show_viewfinder( source: Gst.Element, *, show_n_seconds=5, @@ -801,7 +801,7 @@ def main(): dev_element = device.create_element() if args.subcommand == "show-viewfinder": - display_viewfinder(dev_element, show_n_seconds=args.seconds) + show_viewfinder(dev_element, show_n_seconds=args.seconds) continue if not os.path.isdir(args.path): From be88eac9c37d9d379d4c14b518d18c772f494fe0 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:16:51 +0800 Subject: [PATCH 36/79] fix: bad timeout value --- providers/base/bin/camera_test_auto_gst_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 293daaa7ba..85c6257c4a 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -467,7 +467,7 @@ def start(): # it's possible to hang here if the source is broken # but the main thread will keep running, # so we check both an explicit fail and a hang - if pipeline.get_state(5)[0] == Gst.StateChangeReturn.FAILURE: + if pipeline.get_state(5 * 10**9)[0] != Gst.StateChangeReturn.SUCCESS: pipeline.set_state(Gst.State.NULL) raise RuntimeError("Failed to transition to playing state") From 7bdc1b0b2a392b161518b5bf168052e830d79387 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:17:18 +0800 Subject: [PATCH 37/79] fix: remove fstrings --- providers/base/bin/camera_test_auto_gst_source.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 85c6257c4a..b7aede5adc 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -243,7 +243,6 @@ def get_all_fixated_caps( fixed_caps = [] # type: list[Gst.Caps] - print(f"\n{caps.get_size()}\n") for i in range(caps.get_size()): struct = caps.get_structure(i) caps_i = Gst.Caps.from_string(struct.to_string()) # type: Gst.Caps @@ -537,7 +536,7 @@ def quit(): GLib.timeout_add_seconds(run_n_seconds, graceful_quit) start() - logger.info(f"[ OK ] Pipeline is playing!") + logger.info("[ OK ] Pipeline is playing!") main_loop.run() From f6fff3845cc509e64d925b46182fd28e813d168c Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 18 Dec 2024 14:48:47 +0800 Subject: [PATCH 38/79] feat: add fps validation, fix bad filesink --- .../base/bin/camera_test_auto_gst_source.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index b7aede5adc..aebe967641 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -82,16 +82,22 @@ def validate_video_info( expected_width: int, expected_height: int, expected_duration_seconds: int, + expected_fps: int, duration_tolerance_seconds=0.1, ) -> bool: discoverer = GstPbutils.Discoverer() - video_file_path.removeprefix("/") + video_file_path.lstrip("/") info = discoverer.discover_uri("file://" + video_file_path) duration = info.get_duration() # type: int # This is in nanoseconds - video_track = info.get_stream_info().get_streams()[0] - width = video_track.get_width() - height = video_track.get_height() + video_streams = info.get_video_streams() + if len(video_streams) == 0: + logger.error("{} has no video streams.".format(video_file_path)) + return False + + width = video_streams[0].get_width() + height = video_streams[0].get_height() + fps = video_streams[0].get_framerate_num() passed = True @@ -121,6 +127,13 @@ def validate_video_info( ) ) passed = False + if fps != expected_fps: + logger.error( + "Video FPS mismatch. Expected = {}fps, actual = {}fps".format( + expected_fps, fps + ) + ) + passed = False return passed @@ -697,7 +710,7 @@ def record_video( "videoconvert name=converter", # 2 "jpegenc", # 3, avoid massiave uncompressed videos "matroskamux", # 4 - "multifilesink location={}".format(file_path), # 5 + "filesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -868,6 +881,9 @@ def main(): expected_width=cap_struct.get_int("width").value, expected_height=cap_struct.get_int("height").value, duration_tolerance_seconds=args.tolerance, + expected_fps=cap_struct.get_fraction( + "framerate" + ).value_numerator, ) logger.info("[ OK ] All done!") From bebcd7b10ad1131d2adb5fb71fcdb3f0dc898714 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 19 Dec 2024 10:25:42 +0800 Subject: [PATCH 39/79] fix: only check the state of the source because multifilesink does not change state until the 1st buffer --- providers/base/bin/camera_test_auto_gst_source.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index aebe967641..972d398da4 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -479,7 +479,10 @@ def start(): # it's possible to hang here if the source is broken # but the main thread will keep running, # so we check both an explicit fail and a hang - if pipeline.get_state(5 * 10**9)[0] != Gst.StateChangeReturn.SUCCESS: + if ( + pipeline.get_child_by_index(0).get_state(5 * 10**9)[0] + != Gst.StateChangeReturn.SUCCESS + ): pipeline.set_state(Gst.State.NULL) raise RuntimeError("Failed to transition to playing state") From c9b5d3efe795eb0f5a478bcab55b76156d0fe2b0 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 19 Dec 2024 13:26:21 +0800 Subject: [PATCH 40/79] refactor: various cleanups --- .../base/bin/camera_test_auto_gst_source.py | 84 +++++++++++++------ 1 file changed, 60 insertions(+), 24 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 972d398da4..ceabb53de4 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) logging.basicConfig( - format="%(asctime)s %(levelname)s - %(message)s", + format="%(asctime)s %(levelname)s - %(message)s\n", datefmt="%m/%d %H:%M:%S", ) logger.setLevel(logging.DEBUG) @@ -35,6 +35,13 @@ from gi.repository import GLib # type: ignore +ENCODING_PROFILES = { + "mp4_h264": "video/quicktime,variant=iso:video/x-h264", + "ogv_theora": "application/ogg:video/x-theora", + "webm_vp8": "video/webm:video/x-vp8", +} + + def get_devices() -> T.List[Gst.Device]: monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor monitor.add_filter("Video/Source") @@ -95,9 +102,9 @@ def validate_video_info( logger.error("{} has no video streams.".format(video_file_path)) return False - width = video_streams[0].get_width() - height = video_streams[0].get_height() - fps = video_streams[0].get_framerate_num() + width = video_streams[0].get_width() # type: int + height = video_streams[0].get_height() # type: int + fps = video_streams[0].get_framerate_num() # type: int passed = True @@ -113,6 +120,7 @@ def validate_video_info( ) ) passed = False + if width != expected_width: logger.error( "Video width mismatch. Expected = {}, actual = {}".format( @@ -139,6 +147,7 @@ def validate_video_info( class CapsResolver: + INT32_MIN = -2147483648 INT32_MAX = 2147483647 @@ -151,7 +160,12 @@ class CapsResolver: RANGE_REMAP = { "width": [640, 1280, 1920, 2560, 3840], "height": [480, 720, 1080, 1440, 2160], - "framerate": [(15, 1), (30, 1), (60, 1)], # 15fpx, 30fps, 60fps + "framerate": [ + (15, 1), + (30, 1), + (60, 1), + (120, 1), + ], # 15fpx, 30fps, 60fps, 120fps } def extract_fraction_range( @@ -264,7 +278,7 @@ def get_all_fixated_caps( for prop in self.RANGE_REMAP.keys(): s_i = caps_i.get_structure(0) # type: Gst.Structure - finite_list = None # type GObject.ValueArray + finite_list = None # type: GObject.ValueArray | None if s_i.has_field_typed(prop, Gst.IntRange): low, high = self.extract_int_range(s_i, prop) finite_list = self.list_to_gobject_value_array( @@ -287,6 +301,8 @@ def get_all_fixated_caps( ), ) )[0] + # creates a struct of the form: temp, prop={30/1, 15/1} + # now we simply get the prop by name finite_list = temp.get_list(prop)[1] if finite_list is not None: @@ -320,6 +336,7 @@ def parse_args(): "-ws", "--wait-seconds", type=int, + dest="seconds", help="Number of seconds to keep the pipeline running " "before taking the photo. Default = 2.", default=2, @@ -377,6 +394,27 @@ def parse_args(): action="store_true", help="Skip video dimension & duration validation", ) + encoding_group = video_subparser.add_mutually_exclusive_group() + encoding_group.add_argument( + "--encoding", + type=str, + choices=["mp4", "ogv", "mpegts"], + help=( + "Choose an encoding preset with this option. " + "Make sure your system actually has the proper muxers and encoders " + "that supports this profile." + ), + ) + encoding_group.add_argument( + "--custom-encoding-string", + type=str, + help=( + "Directly set the encoding string for encodebin. " + "See GStreamer's GstEncodingProfiile page for examples. " + "The examples on that page are included in the --encoding option. " + "Only use this option if you have a custom encoding string." + ), + ) viewfinder_subparser = subparser.add_parser("show-viewfinder") viewfinder_subparser.add_argument( @@ -479,12 +517,13 @@ def start(): # it's possible to hang here if the source is broken # but the main thread will keep running, # so we check both an explicit fail and a hang - if ( - pipeline.get_child_by_index(0).get_state(5 * 10**9)[0] - != Gst.StateChangeReturn.SUCCESS - ): + source_state = pipeline.get_child_by_index(0).get_state(1 * 10**9)[0] + if source_state != Gst.StateChangeReturn.SUCCESS: pipeline.set_state(Gst.State.NULL) - raise RuntimeError("Failed to transition to playing state") + raise RuntimeError( + "Failed to transition to playing state. " + "Source is still in {} state after 1 second." + ) def graceful_quit(): logger.debug("Sending EOS.") @@ -560,8 +599,9 @@ def play_video(filepath: str): global Gtk if not Gtk: gi.require_version("Gtk", "3.0") - from gi.repository import Gtk # type: ignore + from gi.repository import Gtk as _Gtk # type: ignore + Gtk = _Gtk Gtk.init([]) pipeline = Gst.parse_launch( @@ -592,8 +632,9 @@ def show_viewfinder( global Gtk if not Gtk: gi.require_version("Gtk", "3.0") - from gi.repository import Gtk # type: ignore + from gi.repository import Gtk as _Gtk # type: ignore + Gtk = _Gtk Gtk.init([]) partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) @@ -678,14 +719,14 @@ def take_photo( ), "Could not link source element to {}".format(head_elem) def open_valve(): - logging.debug("Opening valve!") + logger.debug("Opening valve!") valve.set_property("drop", False) logger.info( "Created photo pipeline with {} second delay. ".format(delay_seconds) + '"{} ! {}"'.format(elem_to_str(source), partial) ) - logging.debug("Setting playing state") + logger.debug("Setting playing state") run_pipeline( pipeline, @@ -754,7 +795,7 @@ def record_video( ) ) logger.info("{} ! {}".format(elem_to_str(source), partial)) - logging.debug("Setting playing state") + logger.debug("Setting playing state") run_pipeline(pipeline, record_n_seconds) @@ -800,16 +841,11 @@ def main(): ) exit(1) - seconds_per_pipeline = ( - args.wait_seconds if args.subcommand == "take-photo" else args.seconds - ) logger.info("Found {} cameras!".format(len(devices))) print( '[ HINT ] For debugging, remove the "valve" element to get a pipeline', 'that can be run with "gst-launch-1.0".', - "Also keep the pipeline running for {} seconds".format( - seconds_per_pipeline - ), + "Also keep the pipeline running for {} seconds".format(args.seconds), ) for dev_i, device in enumerate(devices): @@ -833,7 +869,7 @@ def main(): logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) logger.info( "Test for this device may take {} seconds for {} caps.".format( - len(all_fixed_caps) * seconds_per_pipeline, len(all_fixed_caps) + len(all_fixed_caps) * args.seconds, len(all_fixed_caps) ) ) @@ -851,7 +887,7 @@ def main(): ) take_photo( dev_element, - delay_seconds=args.wait_seconds, + delay_seconds=args.seconds, caps=capability, file_path=file_path, ) From 84bf76a5fdece4044d8ca5671185e1c7d0719056 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 19 Dec 2024 14:38:33 +0800 Subject: [PATCH 41/79] style: make flake8 happy --- .../base/bin/camera_test_auto_gst_source.py | 67 +++++++++---------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index ceabb53de4..9280260023 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -23,16 +23,16 @@ ) logger.setLevel(logging.DEBUG) -from gi.repository import GObject # type: ignore +from gi.repository import GObject # type: ignore # noqa: E402 Gtk = None gi.require_version("Gst", "1.0") gi.require_version("GstPbutils", "1.0") -from gi.repository import Gst, GstPbutils # type: ignore +from gi.repository import Gst, GstPbutils # type: ignore # noqa: E402 gi.require_version("GLib", "2.0") -from gi.repository import GLib # type: ignore +from gi.repository import GLib # type: ignore # noqa: E402 ENCODING_PROFILES = { @@ -62,6 +62,9 @@ def validate_image_dimensions( expected_width: int, expected_height: int, ) -> bool: + assert os.path.isfile( + image_file_path + ), "Image file doesn't exist at {}".format(image_file_path) image = PIL.Image.open(image_file_path) passed = True @@ -113,7 +116,8 @@ def validate_video_info( > duration_tolerance_seconds * 10**9 ): logger.error( - "Duration not within tolerance. Got {}s, but expected {} +- {}s".format( + "Duration not within tolerance. " + "Got {}s, but expected {} +- {}s".format( round(duration / (10**9), 3), expected_duration_seconds, duration_tolerance_seconds, @@ -213,10 +217,12 @@ def extract_int_range( def remap_range_to_list( self, prop: str, low: int, high: int ) -> T.List[int]: ... + @T.overload def remap_range_to_list( self, prop: str, low: FractionTuple, high: FractionTuple ) -> T.List[FractionTuple]: ... + def remap_range_to_list( self, prop: str, @@ -242,19 +248,13 @@ def remap_range_to_list( return out - def list_to_gobject_value_array(self, l: T.List): - out = GObject.ValueArray() - for e in l: - out.append(e) - return out # this does not guarantee that out has a sensible value - def get_all_fixated_caps( self, caps: Gst.Caps, resolve_method: RangeResolveMethod, limit: T.Optional[int] = None, ) -> T.List[Gst.Caps]: - """Gets all the fixated(1 value per property) caps from a Gst.Caps object + """Gets all the fixated(1 value per property) caps from a Gst.Caps obj :param caps: a mixed Gst.Caps :param resolve_method: how to resolve IntRange and FractionRange values @@ -262,7 +262,8 @@ def get_all_fixated_caps( - "remap" => picks out a set of common values within the original range - "limit" => Use the caps.is_fixed while loop until we reaches limit - :param limit: the limit to use for the "limit" resolver, ignored otherwise + :param limit: the limit to use for the "limit" resolver + - ignored if resolve_method != "limit" :return: a list of fixed caps """ if caps.is_fixed(): @@ -281,9 +282,10 @@ def get_all_fixated_caps( finite_list = None # type: GObject.ValueArray | None if s_i.has_field_typed(prop, Gst.IntRange): low, high = self.extract_int_range(s_i, prop) - finite_list = self.list_to_gobject_value_array( - self.remap_range_to_list(prop, low, high) - ) + finite_list = GObject.ValueArray() + for elem in self.remap_range_to_list(prop, low, high): + finite_list.append(elem) + elif s_i.has_field_typed(prop, Gst.FractionRange): low, high = self.extract_fraction_range(s_i, prop) fraction_list = self.remap_range_to_list( @@ -316,8 +318,6 @@ def get_all_fixated_caps( while not caps_i.is_fixed() and not caps_i.is_empty(): fixed_cap = caps_i.fixate() # type: Gst.Caps - if fixed_cap.get_structure(0).get_name() == "video/x-bayer": - continue fixed_caps.append(fixed_cap) caps_i = caps_i.subtract(fixed_cap) @@ -356,7 +356,7 @@ def parse_args(): photo_subparser.add_argument( "--max-caps", type=int, - help="Set the maximum number of capabilities to check for each device. " + help="Set the maximum number of caps to check for each device. " "Default = 100. " "This is useful for restraining the number of caps on devices " 'that have "continuous" caps.', @@ -398,10 +398,10 @@ def parse_args(): encoding_group.add_argument( "--encoding", type=str, - choices=["mp4", "ogv", "mpegts"], + choices=list(ENCODING_PROFILES.keys()), help=( "Choose an encoding preset with this option. " - "Make sure your system actually has the proper muxers and encoders " + "Make sure your system actually has the proper elements " "that supports this profile." ), ) @@ -456,23 +456,20 @@ def elem_to_str(element: Gst.Element) -> str: try: prop_value = element.get_property(prop.name) - except: + except Exception: logger.debug( - "Property {} is unreadable in {}".format( + "Property {} is unreadable in {}, ignored.".format( prop.name, element_name ) # not every property is readable, ignore unreadable ones ) continue - if ( - hasattr(prop_value, "to_string") - and type(prop_value.to_string).__name__ == "method" - ): + if hasattr(prop_value, "to_string") and callable(prop_value.to_string): # sometimes we have a nice to_string method, prioritize this prop_strings.append( "{}={}".format(prop.name, prop_value.to_string()) ) - elif type(prop_value) is Enum: + elif isinstance(prop, Enum): prop_strings.append("{}={}".format(prop.name, prop_value.value)) else: prop_strings.append( @@ -497,8 +494,9 @@ def run_pipeline( :param run_n_seconds: how long until we stop the main loop. - If None, only wait for EOS. :param force_kill_timeout: how long until a force kill is triggered. - - If None and run_n_seconds != None, then force_kill = run_n_seconds * 2 - - If != None and run_n_seconds != None, an error is raised if force kill <= run_n_seconds + - If None and run_n_seconds != None, then force_kill = run_n_seconds*2 + - If != None and run_n_seconds != None, an error is raised if + force kill <= run_n_seconds :param intermedate_calls: a list of functions to call while the pipeline is running. list[(() -> None, int)], where 2nd elem is the number of seconds to wait RELATIVE to @@ -542,15 +540,15 @@ def graceful_quit(): else: bus_pop_timeout = force_kill_timeout - time.sleep(1) - + before_pop_t = time.time() # it's possible to immediately pop None (got EOS, but message is None) - # so wait 1 second for the message to be constructed before popping + # so we also check if bus_pop_timeout has actually elapsed eos_msg = bus.timed_pop_filtered(bus_pop_timeout, Gst.MessageType.EOS) + after_pop_t = time.time() pipeline.set_state(Gst.State.NULL) main_loop.quit() - if eos_msg is None: + if eos_msg is None and after_pop_t - before_pop_t >= bus_pop_timeout: # have to force system exit here, # GLib.Mainloop overrides the sys.excepthook raise SystemExit( @@ -856,7 +854,8 @@ def main(): continue if not os.path.isdir(args.path): - # must validate early, multifilesink does not check if the path exists + # must validate early + # multifilesink does not check if the path exists raise FileNotFoundError( 'Path "{}" does not exist'.format(args.path) ) From f6aa1f4a45bfc331f4b56c7ea30682502ada6941 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 23 Dec 2024 15:07:46 +0800 Subject: [PATCH 42/79] style:spelling --- providers/base/bin/camera_test_auto_gst_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 9280260023..2eca4dca53 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -497,7 +497,7 @@ def run_pipeline( - If None and run_n_seconds != None, then force_kill = run_n_seconds*2 - If != None and run_n_seconds != None, an error is raised if force kill <= run_n_seconds - :param intermedate_calls: a list of functions to call + :param intermediate_calls: a list of functions to call while the pipeline is running. list[(() -> None, int)], where 2nd elem is the number of seconds to wait RELATIVE to when the pipeline started running From b7fe37af1051fb019528714cbae77311c04619b9 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 26 Dec 2024 11:58:54 +0800 Subject: [PATCH 43/79] style: couple default value with their description --- .../base/bin/camera_test_auto_gst_source.py | 54 +++++++++++++------ 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 2eca4dca53..cd720ee8f9 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -248,6 +248,14 @@ def remap_range_to_list( return out + @T.overload + def get_all_fixated_caps( + self, caps: Gst.Caps, resolve_method: T.Literal["remap"] + ): ... + @T.overload + def get_all_fixated_caps( + self, caps: Gst.Caps, resolve_method: T.Literal["limit"], limit: int + ): ... def get_all_fixated_caps( self, caps: Gst.Caps, @@ -317,6 +325,10 @@ def get_all_fixated_caps( caps_i = Gst.Caps.from_string(s_i.to_string()) while not caps_i.is_fixed() and not caps_i.is_empty(): + if resolve_method == "limit": + assert limit + if len(fixed_caps) >= limit: + break fixed_cap = caps_i.fixate() # type: Gst.Caps fixed_caps.append(fixed_cap) caps_i = caps_i.subtract(fixed_cap) @@ -332,14 +344,15 @@ def parse_args(): subparser = parser.add_subparsers(dest="subcommand", required=True) photo_subparser = subparser.add_parser("take-photo") + default_wait_seconds = 2 photo_subparser.add_argument( "-ws", "--wait-seconds", type=int, dest="seconds", help="Number of seconds to keep the pipeline running " - "before taking the photo. Default = 2.", - default=2, + "before taking the photo. Default = {}.".format(default_wait_seconds), + default=default_wait_seconds, ) photo_subparser.add_argument( "-p", @@ -353,22 +366,28 @@ def parse_args(): action="store_true", help="Skip image dimension validation", ) + default_max_caps = 100 photo_subparser.add_argument( "--max-caps", type=int, + default=default_max_caps, help="Set the maximum number of caps to check for each device. " - "Default = 100. " - "This is useful for restraining the number of caps on devices " - 'that have "continuous" caps.', + "Default = {}. ".format(default_max_caps) + + "This is useful for restraining the number of caps on devices " + 'that have "continuous" caps. ' + "Note that the caps are chosen by GStreamer's GstCaps.fixate()", ) video_subparser = subparser.add_parser("record-video") + default_record_seconds = 5 video_subparser.add_argument( "-s", "--seconds", type=int, - help="Number of seconds to record. Default = 5.", - default=5, + help="Number of seconds to record. Default = {}.".format( + default_record_seconds + ), + default=default_record_seconds, ) video_subparser.add_argument( "-p", @@ -377,6 +396,7 @@ def parse_args(): help="Where to save the file. This should be a directory.", required=True, ) + default_tolerance = 0.5 video_subparser.add_argument( "-t", "--tolerance", @@ -384,10 +404,10 @@ def parse_args(): help=( "Tolerance for validating the recording duration in seconds. " "Ex. If the video is supposed to be 5s, tolerance is 0.1s, " - "then durations in [4.9s, 5.1s] inclusive will pass the validation" - "Default is 0.5s." + "then 4.9s <= duration <= 5.1s will pass the validation. " + + "Default is {}s.".format(default_tolerance) ), - default=0.5, + default=default_tolerance, ) video_subparser.add_argument( "--skip-validation", @@ -410,19 +430,22 @@ def parse_args(): type=str, help=( "Directly set the encoding string for encodebin. " - "See GStreamer's GstEncodingProfiile page for examples. " + "See GStreamer's GstEncodingProfile page for examples. " "The examples on that page are included in the --encoding option. " "Only use this option if you have a custom encoding string." ), ) viewfinder_subparser = subparser.add_parser("show-viewfinder") + default_viewfinder_seconds = 10 viewfinder_subparser.add_argument( "-s", "--seconds", type=int, - help="Show the viewfinder for n seconds", - default=10, + help="Show the viewfinder for n seconds. Default = {}".format( + default_viewfinder_seconds + ), + default=default_viewfinder_seconds, ) player_subparser = subparser.add_parser("play-video") @@ -441,8 +464,8 @@ def elem_to_str(element: Gst.Element) -> str: """Prints an element to string - Excluding parent & client name - :param element: gstreamer element - :return: String representaion + :param element: GStreamer element + :return: String representation """ properties = element.list_properties() # list[GObject.GParamSpec] element_name = element.get_factory().get_name() @@ -816,7 +839,6 @@ def record_video( def main(): args = parse_args() - print(args) if os.getuid() == 0: logger.warning( From 0ae9f84321313578facce4d0089005d037554e13 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 26 Dec 2024 14:47:53 +0800 Subject: [PATCH 44/79] fix: incorrect pipeline start and stop --- .../base/bin/camera_test_auto_gst_source.py | 122 +++++------------- 1 file changed, 34 insertions(+), 88 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index cd720ee8f9..cff9932a92 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -2,6 +2,7 @@ from enum import Enum import os +import sys import PIL.Image import gi from argparse import ArgumentParser @@ -147,6 +148,8 @@ def validate_video_info( ) passed = False + if passed: + print("video validation pass!") return passed @@ -507,90 +510,32 @@ def elem_to_str(element: Gst.Element) -> str: def run_pipeline( pipeline: Gst.Pipeline, run_n_seconds: T.Optional[int] = None, - force_kill_timeout: int = 300, intermediate_calls: T.List[T.Tuple[int, VoidFn]] = [], + stop_on_error=True, ): - """Run the pipeline - - :param pipeline: Gst.Pipeline. All element creation/linking steps - should be done by this point - :param run_n_seconds: how long until we stop the main loop. - - If None, only wait for EOS. - :param force_kill_timeout: how long until a force kill is triggered. - - If None and run_n_seconds != None, then force_kill = run_n_seconds*2 - - If != None and run_n_seconds != None, an error is raised if - force kill <= run_n_seconds - :param intermediate_calls: a list of functions to call - while the pipeline is running. list[(() -> None, int)], where 2nd elem - is the number of seconds to wait RELATIVE to - when the pipeline started running - :raises RuntimeError: When set_state(PLAYING) fails - """ - bus = pipeline.get_bus() - assert bus - main_loop = GLib.MainLoop.new( # type: GLib.MainLoop - None, False # type: ignore - ) + loop = GLib.MainLoop() - # pipeline needs to start within 5 seconds - def start(): - pipeline.set_state(Gst.State.PLAYING) - # it's possible to hang here if the source is broken - # but the main thread will keep running, - # so we check both an explicit fail and a hang - source_state = pipeline.get_child_by_index(0).get_state(1 * 10**9)[0] - if source_state != Gst.StateChangeReturn.SUCCESS: - pipeline.set_state(Gst.State.NULL) - raise RuntimeError( - "Failed to transition to playing state. " - "Source is still in {} state after 1 second." - ) + def err_handler(_, msg: Gst.Message): + if msg.type == Gst.MessageType.ERROR: + logger.error("Got Gst Error: " + str(msg.parse_error()[0])) + if stop_on_error: + loop.quit() + pipeline.set_state(Gst.State.NULL) - def graceful_quit(): + def send_eos_and_wait(): logger.debug("Sending EOS.") - # Terminate gracefully with EOS. - # Directly setting it to null can cause videos to have timestamp issues - eos_handled = pipeline.send_event(Gst.Event.new_eos()) - - if not eos_handled: - logger.error("EOS was not handled by the pipeline. ") - pipeline.set_state(Gst.State.NULL) # force stop - main_loop.quit() - return - - if not force_kill_timeout and run_n_seconds: - bus_pop_timeout = run_n_seconds * 2 - else: - bus_pop_timeout = force_kill_timeout - - before_pop_t = time.time() - # it's possible to immediately pop None (got EOS, but message is None) - # so we also check if bus_pop_timeout has actually elapsed - eos_msg = bus.timed_pop_filtered(bus_pop_timeout, Gst.MessageType.EOS) - after_pop_t = time.time() - pipeline.set_state(Gst.State.NULL) - main_loop.quit() - - if eos_msg is None and after_pop_t - before_pop_t >= bus_pop_timeout: - # have to force system exit here, - # GLib.Mainloop overrides the sys.excepthook - raise SystemExit( - "Did not receive EOS after {} seconds. ".format( - bus_pop_timeout - ) - + "Pipeline likely hanged." - ) - - def quit(): - logger.debug("Setting state to NULL.") + pipeline.send_event(Gst.Event.new_eos()) + bus = pipeline.get_bus() + # this time is relative to when the EOS is sent + # we just wait a bit for EOS to appear + bus.timed_pop_filtered( + 3 * Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR + ) + loop.quit() pipeline.set_state(Gst.State.NULL) - main_loop.quit() - # Must explicitly unref if ref_count is somehow not 1, - # otherwise source is never released - # not sure why graceful_quit doesn't need this - if pipeline.ref_count > 1: - pipeline.unref() + if run_n_seconds: + GLib.timeout_add_seconds(run_n_seconds, send_eos_and_wait) for delay, call in intermediate_calls: assert run_n_seconds is None or delay < run_n_seconds, ( "Delay for each call must be smaller than total run seconds, " @@ -600,20 +545,21 @@ def quit(): ) GLib.timeout_add_seconds(delay, call) - if run_n_seconds is None: - bus.add_signal_watch() - bus.connect( - "message", - lambda _, msg: msg.type - in (Gst.MessageType.EOS, Gst.MessageType.ERROR) - and quit(), + b = pipeline.get_bus() + b.add_signal_watch() + b.connect("message", err_handler) + + pipeline.set_state(Gst.State.PLAYING) + source_state = pipeline.get_child_by_index(0).get_state(1 * 10**9)[0] + if source_state != Gst.StateChangeReturn.SUCCESS: + pipeline.set_state(Gst.State.NULL) + raise RuntimeError( + "Failed to transition to playing state. " + "Source is still in {} state after 1 second." ) - else: - GLib.timeout_add_seconds(run_n_seconds, graceful_quit) - start() logger.info("[ OK ] Pipeline is playing!") - main_loop.run() + loop.run() def play_video(filepath: str): From 181f7b30c687c96b0ab25544a84dc14dc19fe853 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 26 Dec 2024 16:00:42 +0800 Subject: [PATCH 45/79] fix: add import error check --- .../base/bin/camera_test_auto_gst_source.py | 26 +++++-------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index cff9932a92..2280fe91a2 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -2,13 +2,11 @@ from enum import Enum import os -import sys import PIL.Image import gi from argparse import ArgumentParser import typing as T import logging -import time VoidFn = T.Callable[[], None] # takes nothing and returns nothing @@ -148,8 +146,6 @@ def validate_video_info( ) passed = False - if passed: - print("video validation pass!") return passed @@ -511,17 +507,9 @@ def run_pipeline( pipeline: Gst.Pipeline, run_n_seconds: T.Optional[int] = None, intermediate_calls: T.List[T.Tuple[int, VoidFn]] = [], - stop_on_error=True, ): loop = GLib.MainLoop() - def err_handler(_, msg: Gst.Message): - if msg.type == Gst.MessageType.ERROR: - logger.error("Got Gst Error: " + str(msg.parse_error()[0])) - if stop_on_error: - loop.quit() - pipeline.set_state(Gst.State.NULL) - def send_eos_and_wait(): logger.debug("Sending EOS.") pipeline.send_event(Gst.Event.new_eos()) @@ -545,10 +533,6 @@ def send_eos_and_wait(): ) GLib.timeout_add_seconds(delay, call) - b = pipeline.get_bus() - b.add_signal_watch() - b.connect("message", err_handler) - pipeline.set_state(Gst.State.PLAYING) source_state = pipeline.get_child_by_index(0).get_state(1 * 10**9)[0] if source_state != Gst.StateChangeReturn.SUCCESS: @@ -599,10 +583,14 @@ def show_viewfinder( global Gtk if not Gtk: gi.require_version("Gtk", "3.0") - from gi.repository import Gtk as _Gtk # type: ignore + try: + from gi.repository import Gtk as _Gtk # type: ignore - Gtk = _Gtk - Gtk.init([]) + Gtk = _Gtk + Gtk.init([]) + except ImportError: + logger.error("Unable to import Gtk") + return partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline From 7edf6088e9d55c263ec8929e41888d54a2b32d7d Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 1 Jan 2025 22:24:07 +0800 Subject: [PATCH 46/79] style: cleanups --- .../base/bin/camera_test_auto_gst_source.py | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 2280fe91a2..70ba3b9367 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -10,11 +10,6 @@ VoidFn = T.Callable[[], None] # takes nothing and returns nothing -# https://github.com/TheImagingSource/tiscamera/blob/master/examples/python/00-list-devices.py - -# detect intrange -# http://gstreamer-devel.230.s1.nabble.com/gstreamer-python-binding-and-intRange-td969231.html#a969232 - logger = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s %(levelname)s - %(message)s\n", @@ -56,10 +51,7 @@ class MediaValidator: @staticmethod def validate_image_dimensions( - image_file_path: str, - *, - expected_width: int, - expected_height: int, + image_file_path: str, *, expected_width: int, expected_height: int ) -> bool: assert os.path.isfile( image_file_path @@ -92,7 +84,7 @@ def validate_video_info( expected_height: int, expected_duration_seconds: int, expected_fps: int, - duration_tolerance_seconds=0.1, + duration_tolerance_seconds=0.1 ) -> bool: discoverer = GstPbutils.Discoverer() @@ -155,7 +147,7 @@ class CapsResolver: INT32_MAX = 2147483647 # (top, bottom) or (numerator, denominator) - FractionTuple = tuple[int, int] + FractionTuple = T.Tuple[int, int] # Used when we encounter IntRange or FractionRange types # Simply fixating the caps will produce too many caps, # so we restrict to these common ones @@ -291,7 +283,7 @@ def get_all_fixated_caps( low, high = self.extract_int_range(s_i, prop) finite_list = GObject.ValueArray() for elem in self.remap_range_to_list(prop, low, high): - finite_list.append(elem) + finite_list.append(elem) # type: ignore elif s_i.has_field_typed(prop, Gst.FractionRange): low, high = self.extract_fraction_range(s_i, prop) @@ -305,8 +297,11 @@ def get_all_fixated_caps( "temp, {}={{{}}}".format( prop, ",".join( - "{}/{}".format(f[0], f[1]) - for f in fraction_list + "{}/{}".format(numerator, denominator) + for ( + numerator, + denominator, + ) in fraction_list ), ) )[0] @@ -568,11 +563,7 @@ def play_video(filepath: str): run_pipeline(pipeline) -def show_viewfinder( - source: Gst.Element, - *, - show_n_seconds=5, -): +def show_viewfinder(source: Gst.Element, *, show_n_seconds=5): """Shows a viewfinder for the given camera source :param source: camera source element. @@ -613,7 +604,7 @@ def take_photo( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - delay_seconds=0, + delay_seconds=0 ): """Take a photo using the source element @@ -697,7 +688,7 @@ def record_video( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - record_n_seconds=0, + record_n_seconds=0 ): assert file_path.endswith( ".mkv" From 01e35a499dd3375cd187acb6901f0572b623a76c Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 00:25:10 +0800 Subject: [PATCH 47/79] refactor: move core pipeline to checkbox support, fix segfault on early exit --- .../checkbox_support/camera_pipelines.py | 553 ++++++++++++++++++ .../base/bin/camera_test_auto_gst_source.py | 547 +---------------- 2 files changed, 574 insertions(+), 526 deletions(-) create mode 100644 checkbox-support/checkbox_support/camera_pipelines.py diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py new file mode 100644 index 0000000000..5e15515281 --- /dev/null +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -0,0 +1,553 @@ +from enum import Enum +import gi +import typing as T +import logging + + +logger = logging.getLogger(__name__) +logging.basicConfig( + format="%(asctime)s %(levelname)s - %(message)s\n", + datefmt="%m/%d %H:%M:%S", +) +logger.setLevel(logging.DEBUG) + +from gi.repository import GObject # type: ignore # noqa: E402 + +Gtk = None + +gi.require_version("Gst", "1.0") +from gi.repository import Gst # type: ignore # noqa: E402 + +gi.require_version("GLib", "2.0") +from gi.repository import GLib # type: ignore # noqa: E402 + +TimeoutCallback = T.Callable[[], None] + + +class CapsResolver: + + INT32_MIN = -2147483648 + INT32_MAX = 2147483647 + + # (top, bottom) or (numerator, denominator) + FractionTuple = T.Tuple[int, int] + # Used when we encounter IntRange or FractionRange types + # Simply fixating the caps will produce too many caps, + # so we restrict to these common ones + RangeResolveMethod = T.Literal["remap", "limit"] + RANGE_REMAP = { + "width": [640, 1280, 1920, 2560, 3840], + "height": [480, 720, 1080, 1440, 2160], + "framerate": [ + (15, 1), + (30, 1), + (60, 1), + (120, 1), + ], # 15fpx, 30fps, 60fps, 120fps + } + + def extract_fraction_range( + self, struct: Gst.Structure, prop_name: str + ) -> T.Tuple[FractionTuple, FractionTuple]: + """Extracts (low, high) fraction range from a Gst.Structure + + :param struct: structure whose prop_name is a Gst.FractionRange + :param prop_name: name of the property + :return: (low, high) fraction tuple + - NOTE: low is defined as having a smaller numerator + """ + assert struct.has_field_typed(prop_name, Gst.FractionRange) + low = struct.copy() # type: Gst.Structure + high = struct.copy() # type: Gst.Structure + low.fixate_field_nearest_fraction(prop_name, 0, 1) + high.fixate_field_nearest_fraction(prop_name, self.INT32_MAX, 1) + + return ( + low.get_fraction(prop_name)[1:], + high.get_fraction(prop_name)[1:], + ) + + def extract_int_range( + self, struct: Gst.Structure, prop_name: str + ) -> T.Tuple[int, int]: + """Bit of a hack to work around the missing Gst.IntRange type + + :param struct: structure whose prop_name property is a Gst.IntRange + :param prop_name: name of the property + :return: (low, high) integer tuple + """ + # the introspected class exists, but we can't construct it + assert struct.has_field_typed(prop_name, Gst.IntRange) + + low = struct.copy() # type: Gst.Structure + high = struct.copy() # type: Gst.Structure + low.fixate_field_nearest_int(prop_name, self.INT32_MIN) + high.fixate_field_nearest_int(prop_name, self.INT32_MAX) + + # get_int returns a (success, value) tuple + return low.get_int(prop_name)[1], high.get_int(prop_name)[1] + + @T.overload + def remap_range_to_list( + self, prop: str, low: int, high: int + ) -> T.List[int]: ... + + @T.overload + def remap_range_to_list( + self, prop: str, low: FractionTuple, high: FractionTuple + ) -> T.List[FractionTuple]: ... + + def remap_range_to_list( + self, + prop: str, + low: T.Union[int, FractionTuple], + high: T.Union[int, FractionTuple], + ) -> T.List: + """Creates a GObject.ValueArray based on range + that can be used in Gst.Caps + + :param low: min value, inclusive + :param high: max value, inclusive + :return: ValueArray object. Usage: Caps.set_property(prop, value_array) + """ + out = [] + assert ( + prop in self.RANGE_REMAP + ), "Property {} does not have a remap definition".format(prop) + + for val in self.RANGE_REMAP[prop]: + # lt gt are defined as pairwise comparison on tuples + if val >= low and val <= high: + out.append(val) + + return out + + @T.overload + def get_all_fixated_caps( + self, caps: Gst.Caps, resolve_method: T.Literal["remap"] + ): ... + @T.overload + def get_all_fixated_caps( + self, caps: Gst.Caps, resolve_method: T.Literal["limit"], limit: int + ): ... + def get_all_fixated_caps( + self, + caps: Gst.Caps, + resolve_method: RangeResolveMethod, + limit: T.Optional[int] = None, + ) -> T.List[Gst.Caps]: + """Gets all the fixated(1 value per property) caps from a Gst.Caps obj + + :param caps: a mixed Gst.Caps + :param resolve_method: how to resolve IntRange and FractionRange values + - Only applies to width, height, and framerate for now + - "remap" => picks out a set of common values within the original range + - "limit" => Use the caps.is_fixed while loop until we reaches limit + + :param limit: the limit to use for the "limit" resolver + - ignored if resolve_method != "limit" + :return: a list of fixed caps + """ + if caps.is_fixed(): + return [caps] + + fixed_caps = [] # type: list[Gst.Caps] + + for i in range(caps.get_size()): + struct = caps.get_structure(i) + caps_i = Gst.Caps.from_string(struct.to_string()) # type: Gst.Caps + + if resolve_method == "remap": + for prop in self.RANGE_REMAP.keys(): + s_i = caps_i.get_structure(0) # type: Gst.Structure + + finite_list = None # type: GObject.ValueArray | None + if s_i.has_field_typed(prop, Gst.IntRange): + low, high = self.extract_int_range(s_i, prop) + finite_list = GObject.ValueArray() + for elem in self.remap_range_to_list(prop, low, high): + finite_list.append(elem) # type: ignore + + elif s_i.has_field_typed(prop, Gst.FractionRange): + low, high = self.extract_fraction_range(s_i, prop) + fraction_list = self.remap_range_to_list( + prop, low, high + ) + # workaround missing Gst.Fraction + # we can't directly create fraction objects + # but we can create a struct from str, then access it + temp = Gst.Structure.from_string( + "temp, {}={{{}}}".format( + prop, + ",".join( + "{}/{}".format(numerator, denominator) + for ( + numerator, + denominator, + ) in fraction_list + ), + ) + )[0] + # creates a struct of the form: temp, prop={30/1, 15/1} + # now we simply get the prop by name + finite_list = temp.get_list(prop)[1] + + if finite_list is not None: + assert finite_list.n_values != 0 + s_i.set_list( + prop, + finite_list, + ) + + caps_i = Gst.Caps.from_string(s_i.to_string()) + + while not caps_i.is_fixed() and not caps_i.is_empty(): + if resolve_method == "limit": + assert limit + if len(fixed_caps) >= limit: + break + fixed_cap = caps_i.fixate() # type: Gst.Caps + fixed_caps.append(fixed_cap) + caps_i = caps_i.subtract(fixed_cap) + + if caps_i.is_fixed(): + fixed_caps.append(caps_i) + + return fixed_caps + + +def elem_to_str(element: Gst.Element) -> str: + """Prints an element to string + - Excluding parent & client name + + :param element: GStreamer element + :return: String representation + """ + properties = element.list_properties() # list[GObject.GParamSpec] + element_name = element.get_factory().get_name() + + exclude = ["parent", "client-name"] + prop_strings = [] # type: list[str] + + for prop in properties: + if prop.name in exclude: + continue + + try: + prop_value = element.get_property(prop.name) + except Exception: + logger.debug( + "Property {} is unreadable in {}, ignored.".format( + prop.name, element_name + ) # not every property is readable, ignore unreadable ones + ) + continue + + if hasattr(prop_value, "to_string") and callable(prop_value.to_string): + # sometimes we have a nice to_string method, prioritize this + prop_strings.append( + "{}={}".format(prop.name, prop_value.to_string()) + ) + elif isinstance(prop, Enum): + prop_strings.append("{}={}".format(prop.name, prop_value.value)) + else: + prop_strings.append( + "{}={}".format(prop.name, str(prop_value)) + ) # handle native python types + + return "{} {}".format( + element_name, " ".join(prop_strings) + ) # libcamerasrc name=cam_name location=p.jpeg + + +def run_pipeline( + pipeline: Gst.Pipeline, + run_n_seconds: T.Optional[int] = None, + intermediate_calls: T.List[T.Tuple[int, TimeoutCallback]] = [], +): + loop = GLib.MainLoop() + remaining_timeouts = set() # type: set[int] + print('curr pipeline id', id(pipeline)) + def gst_msg_handler(_, msg: Gst.Message): + if msg.type == Gst.MessageType.EOS: + logger.info("Received EOS") + loop.quit() + pipeline.set_state(Gst.State.NULL) + + if msg.type == Gst.MessageType.ERROR: + logger.error( + "Pipeline encountered an error, stopping. " + + str(Gst.Message.parse_error(msg)) + ) + loop.quit() + pipeline.set_state(Gst.State.NULL) + + for timeout in remaining_timeouts: + # if the pipeline is terminated early, remove all timers + # because loop.quit() won't remove those + # that are already scheduled + # this may produce warnings, but won't stop execution + GLib.source_remove(timeout) + + if msg.type == Gst.MessageType.WARNING: + logger.warning(Gst.Message.parse_warning(msg)) + + def send_eos(): + logger.debug("Sending EOS.") + pipeline.send_event(Gst.Event.new_eos()) + + if run_n_seconds: + remaining_timeouts.add( + GLib.timeout_add_seconds(run_n_seconds, send_eos) + ) + + for delay, call in intermediate_calls: + assert run_n_seconds is None or delay < run_n_seconds, ( + "Delay for each call must be smaller than total run seconds, " + " (Got delay = {}, run_n_seconds = {})".format( + delay, run_n_seconds + ) + ) + remaining_timeouts.add(GLib.timeout_add_seconds(delay, call)) + + bus = pipeline.get_bus() + bus.add_signal_watch() + bus.connect("message", gst_msg_handler) + + pipeline.set_state(Gst.State.PLAYING) + source_state = pipeline.get_child_by_index(0).get_state(1 * Gst.SECOND)[0] + if source_state != Gst.StateChangeReturn.SUCCESS: + pipeline.set_state(Gst.State.NULL) + raise RuntimeError( + "Failed to transition to playing state. " + "Source is still in {} state after 1 second.".format(source_state) + ) + + logger.info("[ OK ] Pipeline is playing!") + loop.run() + + +def play_video(filepath: str): + global Gtk + if not Gtk: + gi.require_version("Gtk", "3.0") + from gi.repository import Gtk as _Gtk # type: ignore + + Gtk = _Gtk + Gtk.init([]) + + pipeline = Gst.parse_launch( + " ! ".join( + [ + "filesrc location={}".format(filepath), + "decodebin", + "videoconvert", + "autovideosink", + ] + ) + ) + run_pipeline(pipeline) + + +def show_viewfinder(source: Gst.Element, *, show_n_seconds=5): + """Shows a viewfinder for the given camera source + + :param source: camera source element. + If there is any property that needs to be set, + do that before calling this function + :param show_n_seconds: number of seconds to keep the viewfinder on screen + """ + global Gtk + if not Gtk: + gi.require_version("Gtk", "3.0") + try: + from gi.repository import Gtk as _Gtk # type: ignore + + Gtk = _Gtk + Gtk.init([]) + except ImportError: + logger.error("Unable to import Gtk") + return + + partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) + pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline + head = pipeline.get_by_name("head") + + assert pipeline.add(source) + assert head + assert source.link(head) + + logger.info( + "[ OK ] Created pipeline for viewfinder: {} ! {}".format( + elem_to_str(source), partial_pipeline + ) + ) + run_pipeline( + pipeline, + show_n_seconds, + ) + + +def take_photo( + source: Gst.Element, + *, + caps: T.Optional[Gst.Caps] = None, + file_path: str, + delay_seconds=0 +): + """Take a photo using the source element + + :param source: The camera source element + :param caps: Which capability to use for the source + - If None, no caps filter will be inserted between source and decoder + :param file_path: the path to the photo + :param delay_seconds: number of seconds to keep the source "open" + before taking the photo + """ + + # this may seem unorthodox + # but it's way less verbose than creating individual elements + str_elements = [ + 'capsfilter name=source-caps caps="{}"', # 0 + "decodebin", # 1 + "videoconvert name=converter", # 2 + "valve name=photo-valve drop=True", # 4 + "jpegenc", # 3 + "multifilesink location={}".format(file_path), # 5 + ] + head_elem_name = "source-caps" + + # using empty string as null values here + # they are filtered out at parse_launch + if caps: + assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) + + str_elements[0] = str_elements[0].format(caps.to_string()) + mime_type = caps.get_structure(0).get_name() # type: str + + if mime_type == "image/jpeg": + # decodebin has funny clock problem with live sources in image/jpeg + str_elements[1] = "jpegdec" + elif mime_type == "video/x-raw": + # don't need a decoder for raw + str_elements[1] = "" + # else case is using decodebin as a fallback + else: + # decode bin doesn't work with video/x-raw + str_elements[0] = str_elements[1] = str_elements[3] = "" + head_elem_name = "converter" + + partial = " ! ".join(elem for elem in str_elements if elem) + pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline + head_elem = pipeline.get_by_name(head_elem_name) + valve = pipeline.get_by_name("photo-valve") + + # parse the partial pipeline, then get head element by name + assert pipeline.add( + source + ), "Could not add source element {} to the pipeline".format( + elem_to_str(source) + ) + assert head_elem and valve + assert source.link( + head_elem + ), "Could not link source element to {}".format(head_elem) + + def open_valve(): + logger.debug("Opening valve!") + valve.set_property("drop", False) + + logger.info( + "Created photo pipeline with {} second delay. ".format(delay_seconds) + + '"{} ! {}"'.format(elem_to_str(source), partial) + ) + logger.debug("Setting playing state") + + run_pipeline( + pipeline, + delay_seconds + 1, # workaround for now, weird problem with ref count + intermediate_calls=[ + (delay_seconds, open_valve), + ], + ) + + logger.info("[ OK ] Photo was saved to {}".format(file_path)) + + +def record_video( + source: Gst.Element, + *, + caps: T.Optional[Gst.Caps] = None, + file_path: str, + record_n_seconds=0 +): + assert file_path.endswith( + ".mkv" + ), "This function uses matroskamux, so the filename must end in .mkv" + + str_elements = [ + 'capsfilter name=source-caps caps="{}"', # 0 + "decodebin", # 1 + "videoconvert name=converter", # 2 + "jpegenc", # 3, avoid massiave uncompressed videos + "matroskamux", # 4 + "filesink location={}".format(file_path), # 5 + ] + + head_elem_name = "source-caps" + + if caps: + assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) + + str_elements[0] = str_elements[0].format(caps.to_string()) + mime_type = caps.get_structure(0).get_name() # type: str + + if mime_type == "image/jpeg": + str_elements[1] = "jpegdec" + elif mime_type == "video/x-raw": + str_elements[1] = "" + else: + # decodebin doesn't work with video/x-raw + str_elements[0] = str_elements[1] = "" + head_elem_name = "converter" + + partial = " ! ".join(elem for elem in str_elements if elem) + pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline + head_elem = pipeline.get_by_name(head_elem_name) + + # parse the partial pipeline, then get head element by name + assert pipeline.add( + source + ), "Could not add source element {} to the pipeline".format( + elem_to_str(source) + ) + assert head_elem + assert source.link( + head_elem + ), "Could not link source element to {}".format(head_elem) + + logger.info( + "[ OK ] Created video pipeline to record {} seconds".format( + record_n_seconds + ) + ) + logger.info("{} ! {}".format(elem_to_str(source), partial)) + logger.debug("Setting playing state") + + run_pipeline(pipeline, record_n_seconds) + + logger.info( + "[ OK ] Video for this capability: " + + "{}".format(caps.to_string() if caps else "[device default]") + + " was saved to {}".format(file_path) + ) + + """record + videotestsrc num-buffers=120 ! + queue ! + encodebin profile="video/quicktime,variant=iso:video/x-h264" ! + multifilesink location=video.mp4 + """ + """decode + filesrc location=video.mp4 ! decodebin ! autovideosink + """ diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 70ba3b9367..139ff4d417 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -1,14 +1,12 @@ #! /usr/bin/python3 -from enum import Enum import os import PIL.Image import gi from argparse import ArgumentParser import typing as T import logging - -VoidFn = T.Callable[[], None] # takes nothing and returns nothing +from checkbox_support import camera_pipelines as cam logger = logging.getLogger(__name__) logging.basicConfig( @@ -17,8 +15,6 @@ ) logger.setLevel(logging.DEBUG) -from gi.repository import GObject # type: ignore # noqa: E402 - Gtk = None gi.require_version("Gst", "1.0") @@ -26,25 +22,6 @@ from gi.repository import Gst, GstPbutils # type: ignore # noqa: E402 gi.require_version("GLib", "2.0") -from gi.repository import GLib # type: ignore # noqa: E402 - - -ENCODING_PROFILES = { - "mp4_h264": "video/quicktime,variant=iso:video/x-h264", - "ogv_theora": "application/ogg:video/x-theora", - "webm_vp8": "video/webm:video/x-vp8", -} - - -def get_devices() -> T.List[Gst.Device]: - monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor - monitor.add_filter("Video/Source") - monitor.start() - - devices = monitor.get_devices() - - monitor.stop() - return devices class MediaValidator: @@ -141,196 +118,22 @@ def validate_video_info( return passed -class CapsResolver: - - INT32_MIN = -2147483648 - INT32_MAX = 2147483647 - - # (top, bottom) or (numerator, denominator) - FractionTuple = T.Tuple[int, int] - # Used when we encounter IntRange or FractionRange types - # Simply fixating the caps will produce too many caps, - # so we restrict to these common ones - RangeResolveMethod = T.Literal["remap", "limit"] - RANGE_REMAP = { - "width": [640, 1280, 1920, 2560, 3840], - "height": [480, 720, 1080, 1440, 2160], - "framerate": [ - (15, 1), - (30, 1), - (60, 1), - (120, 1), - ], # 15fpx, 30fps, 60fps, 120fps - } - - def extract_fraction_range( - self, struct: Gst.Structure, prop_name: str - ) -> T.Tuple[FractionTuple, FractionTuple]: - """Extracts (low, high) fraction range from a Gst.Structure - - :param struct: structure whose prop_name is a Gst.FractionRange - :param prop_name: name of the property - :return: (low, high) fraction tuple - - NOTE: low is defined as having a smaller numerator - """ - assert struct.has_field_typed(prop_name, Gst.FractionRange) - low = struct.copy() # type: Gst.Structure - high = struct.copy() # type: Gst.Structure - low.fixate_field_nearest_fraction(prop_name, 0, 1) - high.fixate_field_nearest_fraction(prop_name, self.INT32_MAX, 1) - - return ( - low.get_fraction(prop_name)[1:], - high.get_fraction(prop_name)[1:], - ) +ENCODING_PROFILES = { + "mp4_h264": "video/quicktime,variant=iso:video/x-h264", + "ogv_theora": "application/ogg:video/x-theora", + "webm_vp8": "video/webm:video/x-vp8", +} + - def extract_int_range( - self, struct: Gst.Structure, prop_name: str - ) -> T.Tuple[int, int]: - """Bit of a hack to work around the missing Gst.IntRange type - - :param struct: structure whose prop_name property is a Gst.IntRange - :param prop_name: name of the property - :return: (low, high) integer tuple - """ - # the introspected class exists, but we can't construct it - assert struct.has_field_typed(prop_name, Gst.IntRange) - - low = struct.copy() # type: Gst.Structure - high = struct.copy() # type: Gst.Structure - low.fixate_field_nearest_int(prop_name, self.INT32_MIN) - high.fixate_field_nearest_int(prop_name, self.INT32_MAX) - - # get_int returns a (success, value) tuple - return low.get_int(prop_name)[1], high.get_int(prop_name)[1] - - @T.overload - def remap_range_to_list( - self, prop: str, low: int, high: int - ) -> T.List[int]: ... - - @T.overload - def remap_range_to_list( - self, prop: str, low: FractionTuple, high: FractionTuple - ) -> T.List[FractionTuple]: ... - - def remap_range_to_list( - self, - prop: str, - low: T.Union[int, FractionTuple], - high: T.Union[int, FractionTuple], - ) -> T.List: - """Creates a GObject.ValueArray based on range - that can be used in Gst.Caps - - :param low: min value, inclusive - :param high: max value, inclusive - :return: ValueArray object. Usage: Caps.set_property(prop, value_array) - """ - out = [] - assert ( - prop in self.RANGE_REMAP - ), "Property {} does not have a remap definition".format(prop) - - for val in self.RANGE_REMAP[prop]: - # lt gt are defined as pairwise comparison on tuples - if val >= low and val <= high: - out.append(val) - - return out - - @T.overload - def get_all_fixated_caps( - self, caps: Gst.Caps, resolve_method: T.Literal["remap"] - ): ... - @T.overload - def get_all_fixated_caps( - self, caps: Gst.Caps, resolve_method: T.Literal["limit"], limit: int - ): ... - def get_all_fixated_caps( - self, - caps: Gst.Caps, - resolve_method: RangeResolveMethod, - limit: T.Optional[int] = None, - ) -> T.List[Gst.Caps]: - """Gets all the fixated(1 value per property) caps from a Gst.Caps obj - - :param caps: a mixed Gst.Caps - :param resolve_method: how to resolve IntRange and FractionRange values - - Only applies to width, height, and framerate for now - - "remap" => picks out a set of common values within the original range - - "limit" => Use the caps.is_fixed while loop until we reaches limit - - :param limit: the limit to use for the "limit" resolver - - ignored if resolve_method != "limit" - :return: a list of fixed caps - """ - if caps.is_fixed(): - return [caps] - - fixed_caps = [] # type: list[Gst.Caps] - - for i in range(caps.get_size()): - struct = caps.get_structure(i) - caps_i = Gst.Caps.from_string(struct.to_string()) # type: Gst.Caps - - if resolve_method == "remap": - for prop in self.RANGE_REMAP.keys(): - s_i = caps_i.get_structure(0) # type: Gst.Structure - - finite_list = None # type: GObject.ValueArray | None - if s_i.has_field_typed(prop, Gst.IntRange): - low, high = self.extract_int_range(s_i, prop) - finite_list = GObject.ValueArray() - for elem in self.remap_range_to_list(prop, low, high): - finite_list.append(elem) # type: ignore - - elif s_i.has_field_typed(prop, Gst.FractionRange): - low, high = self.extract_fraction_range(s_i, prop) - fraction_list = self.remap_range_to_list( - prop, low, high - ) - # workaround missing Gst.Fraction - # we can't directly create fraction objects - # but we can create a struct from str, then access it - temp = Gst.Structure.from_string( - "temp, {}={{{}}}".format( - prop, - ",".join( - "{}/{}".format(numerator, denominator) - for ( - numerator, - denominator, - ) in fraction_list - ), - ) - )[0] - # creates a struct of the form: temp, prop={30/1, 15/1} - # now we simply get the prop by name - finite_list = temp.get_list(prop)[1] - - if finite_list is not None: - assert finite_list.n_values != 0 - s_i.set_list( - prop, - finite_list, - ) - - caps_i = Gst.Caps.from_string(s_i.to_string()) - - while not caps_i.is_fixed() and not caps_i.is_empty(): - if resolve_method == "limit": - assert limit - if len(fixed_caps) >= limit: - break - fixed_cap = caps_i.fixate() # type: Gst.Caps - fixed_caps.append(fixed_cap) - caps_i = caps_i.subtract(fixed_cap) - - if caps_i.is_fixed(): - fixed_caps.append(caps_i) - - return fixed_caps +def get_devices() -> T.List[Gst.Device]: + monitor = Gst.DeviceMonitor.new() # type: Gst.DeviceMonitor + monitor.add_filter("Video/Source") + monitor.start() + + devices = monitor.get_devices() + + monitor.stop() + return devices def parse_args(): @@ -454,314 +257,6 @@ def parse_args(): return parser.parse_args() -def elem_to_str(element: Gst.Element) -> str: - """Prints an element to string - - Excluding parent & client name - - :param element: GStreamer element - :return: String representation - """ - properties = element.list_properties() # list[GObject.GParamSpec] - element_name = element.get_factory().get_name() - - exclude = ["parent", "client-name"] - prop_strings = [] # type: list[str] - - for prop in properties: - if prop.name in exclude: - continue - - try: - prop_value = element.get_property(prop.name) - except Exception: - logger.debug( - "Property {} is unreadable in {}, ignored.".format( - prop.name, element_name - ) # not every property is readable, ignore unreadable ones - ) - continue - - if hasattr(prop_value, "to_string") and callable(prop_value.to_string): - # sometimes we have a nice to_string method, prioritize this - prop_strings.append( - "{}={}".format(prop.name, prop_value.to_string()) - ) - elif isinstance(prop, Enum): - prop_strings.append("{}={}".format(prop.name, prop_value.value)) - else: - prop_strings.append( - "{}={}".format(prop.name, str(prop_value)) - ) # handle native python types - - return "{} {}".format( - element_name, " ".join(prop_strings) - ) # libcamerasrc name=cam_name location=p.jpeg - - -def run_pipeline( - pipeline: Gst.Pipeline, - run_n_seconds: T.Optional[int] = None, - intermediate_calls: T.List[T.Tuple[int, VoidFn]] = [], -): - loop = GLib.MainLoop() - - def send_eos_and_wait(): - logger.debug("Sending EOS.") - pipeline.send_event(Gst.Event.new_eos()) - bus = pipeline.get_bus() - # this time is relative to when the EOS is sent - # we just wait a bit for EOS to appear - bus.timed_pop_filtered( - 3 * Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR - ) - loop.quit() - pipeline.set_state(Gst.State.NULL) - - if run_n_seconds: - GLib.timeout_add_seconds(run_n_seconds, send_eos_and_wait) - for delay, call in intermediate_calls: - assert run_n_seconds is None or delay < run_n_seconds, ( - "Delay for each call must be smaller than total run seconds, " - " (Got delay = {}, run_n_seconds = {})".format( - delay, run_n_seconds - ) - ) - GLib.timeout_add_seconds(delay, call) - - pipeline.set_state(Gst.State.PLAYING) - source_state = pipeline.get_child_by_index(0).get_state(1 * 10**9)[0] - if source_state != Gst.StateChangeReturn.SUCCESS: - pipeline.set_state(Gst.State.NULL) - raise RuntimeError( - "Failed to transition to playing state. " - "Source is still in {} state after 1 second." - ) - - logger.info("[ OK ] Pipeline is playing!") - loop.run() - - -def play_video(filepath: str): - global Gtk - if not Gtk: - gi.require_version("Gtk", "3.0") - from gi.repository import Gtk as _Gtk # type: ignore - - Gtk = _Gtk - Gtk.init([]) - - pipeline = Gst.parse_launch( - " ! ".join( - [ - "filesrc location={}".format(filepath), - "decodebin", - "videoconvert", - "autovideosink", - ] - ) - ) - run_pipeline(pipeline) - - -def show_viewfinder(source: Gst.Element, *, show_n_seconds=5): - """Shows a viewfinder for the given camera source - - :param source: camera source element. - If there is any property that needs to be set, - do that before calling this function - :param show_n_seconds: number of seconds to keep the viewfinder on screen - """ - global Gtk - if not Gtk: - gi.require_version("Gtk", "3.0") - try: - from gi.repository import Gtk as _Gtk # type: ignore - - Gtk = _Gtk - Gtk.init([]) - except ImportError: - logger.error("Unable to import Gtk") - return - - partial_pipeline = " ! ".join(["videoconvert name=head", "autovideosink"]) - pipeline = Gst.parse_launch(partial_pipeline) # type: Gst.Pipeline - head = pipeline.get_by_name("head") - - assert pipeline.add(source) - assert head - assert source.link(head) - - logger.info( - "[ OK ] Created pipeline for viewfinder: {} ! {}".format( - elem_to_str(source), partial_pipeline - ) - ) - run_pipeline(pipeline, show_n_seconds) - - -def take_photo( - source: Gst.Element, - *, - caps: T.Optional[Gst.Caps] = None, - file_path: str, - delay_seconds=0 -): - """Take a photo using the source element - - :param source: The camera source element - :param caps: Which capability to use for the source - - If None, no caps filter will be inserted between source and decoder - :param file_path: the path to the photo - :param delay_seconds: number of seconds to keep the source "open" - before taking the photo - """ - - # this may seem unorthodox - # but it's way less verbose than creating individual elements - str_elements = [ - 'capsfilter name=source-caps caps="{}"', # 0 - "decodebin", # 1 - "videoconvert name=converter", # 2 - "valve name=photo-valve drop=True", # 4 - "jpegenc", # 3 - "multifilesink location={}".format(file_path), # 5 - ] - head_elem_name = "source-caps" - - # using empty string as null values here - # they are filtered out at parse_launch - if caps: - assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) - - str_elements[0] = str_elements[0].format(caps.to_string()) - mime_type = caps.get_structure(0).get_name() # type: str - - if mime_type == "image/jpeg": - # decodebin has funny clock problem with live sources in image/jpeg - str_elements[1] = "jpegdec" - elif mime_type == "video/x-raw": - # don't need a decoder for raw - str_elements[1] = "" - # else case is using decodebin as a fallback - else: - # decode bin doesn't work with video/x-raw - str_elements[0] = str_elements[1] = str_elements[3] = "" - head_elem_name = "converter" - - partial = " ! ".join(elem for elem in str_elements if elem) - pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline - head_elem = pipeline.get_by_name(head_elem_name) - valve = pipeline.get_by_name("photo-valve") - - # parse the partial pipeline, then get head element by name - assert pipeline.add( - source - ), "Could not add source element {} to the pipeline".format( - elem_to_str(source) - ) - assert head_elem and valve - assert source.link( - head_elem - ), "Could not link source element to {}".format(head_elem) - - def open_valve(): - logger.debug("Opening valve!") - valve.set_property("drop", False) - - logger.info( - "Created photo pipeline with {} second delay. ".format(delay_seconds) - + '"{} ! {}"'.format(elem_to_str(source), partial) - ) - logger.debug("Setting playing state") - - run_pipeline( - pipeline, - delay_seconds + 1, # workaround for now, weird problem with ref count - intermediate_calls=[(delay_seconds, open_valve)], - ) - - logger.info("[ OK ] Photo was saved to {}".format(file_path)) - - -def record_video( - source: Gst.Element, - *, - caps: T.Optional[Gst.Caps] = None, - file_path: str, - record_n_seconds=0 -): - assert file_path.endswith( - ".mkv" - ), "This function uses matroskamux, so the filename must end in .mkv" - - str_elements = [ - 'capsfilter name=source-caps caps="{}"', # 0 - "decodebin", # 1 - "videoconvert name=converter", # 2 - "jpegenc", # 3, avoid massiave uncompressed videos - "matroskamux", # 4 - "filesink location={}".format(file_path), # 5 - ] - - head_elem_name = "source-caps" - - if caps: - assert caps.is_fixed(), '"{}" is not fixed.'.format(caps.to_string()) - - str_elements[0] = str_elements[0].format(caps.to_string()) - mime_type = caps.get_structure(0).get_name() # type: str - - if mime_type == "image/jpeg": - str_elements[1] = "jpegdec" - elif mime_type == "video/x-raw": - str_elements[1] = "" - else: - # decodebin doesn't work with video/x-raw - str_elements[0] = str_elements[1] = "" - head_elem_name = "converter" - - partial = " ! ".join(elem for elem in str_elements if elem) - pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline - head_elem = pipeline.get_by_name(head_elem_name) - - # parse the partial pipeline, then get head element by name - assert pipeline.add( - source - ), "Could not add source element {} to the pipeline".format( - elem_to_str(source) - ) - assert head_elem - assert source.link( - head_elem - ), "Could not link source element to {}".format(head_elem) - - logger.info( - "[ OK ] Created video pipeline to record {} seconds".format( - record_n_seconds - ) - ) - logger.info("{} ! {}".format(elem_to_str(source), partial)) - logger.debug("Setting playing state") - - run_pipeline(pipeline, record_n_seconds) - - logger.info( - "[ OK ] Video for this capability: " - + "{}".format(caps.to_string() if caps else "[device default]") - + " was saved to {}".format(file_path) - ) - - """record - videotestsrc num-buffers=120 ! - queue ! - encodebin profile="video/quicktime,variant=iso:video/x-h264" ! - multifilesink location=video.mp4 - """ - """decode - filesrc location=video.mp4 ! decodebin ! autovideosink - """ - - def main(): args = parse_args() @@ -772,7 +267,7 @@ def main(): ) if args.subcommand == "play-video": - play_video(args.path) + cam.play_video(args.path) return devices = get_devices() @@ -797,7 +292,7 @@ def main(): dev_element = device.create_element() if args.subcommand == "show-viewfinder": - show_viewfinder(dev_element, show_n_seconds=args.seconds) + cam.show_viewfinder(dev_element, show_n_seconds=args.seconds) continue if not os.path.isdir(args.path): @@ -807,7 +302,7 @@ def main(): 'Path "{}" does not exist'.format(args.path) ) - resolver = CapsResolver() + resolver = cam.CapsResolver() all_fixed_caps = resolver.get_all_fixated_caps( device.get_caps(), "remap" ) @@ -831,7 +326,7 @@ def main(): file_path = "{}/photo_dev_{}_cap_{}.jpeg".format( args.path, dev_i, cap_i ) - take_photo( + cam.take_photo( dev_element, delay_seconds=args.seconds, caps=capability, @@ -850,7 +345,7 @@ def main(): file_path = "{}/video_dev_{}_cap_{}.mkv".format( args.path, dev_i, cap_i ) - record_video( + cam.record_video( dev_element, file_path=file_path, caps=capability, From a6e94ed39b1e53a9fd97f13db56fb8b472cfa215 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 00:26:05 +0800 Subject: [PATCH 48/79] style: remove print --- checkbox-support/checkbox_support/camera_pipelines.py | 1 - 1 file changed, 1 deletion(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 5e15515281..d87d17f185 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -267,7 +267,6 @@ def run_pipeline( ): loop = GLib.MainLoop() remaining_timeouts = set() # type: set[int] - print('curr pipeline id', id(pipeline)) def gst_msg_handler(_, msg: Gst.Message): if msg.type == Gst.MessageType.EOS: logger.info("Received EOS") From 543cd3287529d646accdb5e45154fcad28c5cf14 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 11:48:24 +0800 Subject: [PATCH 49/79] fix: always use an upper limit for caps resolver --- .../checkbox_support/camera_pipelines.py | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index d87d17f185..47555f81a6 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -122,19 +122,11 @@ def remap_range_to_list( return out - @T.overload - def get_all_fixated_caps( - self, caps: Gst.Caps, resolve_method: T.Literal["remap"] - ): ... - @T.overload - def get_all_fixated_caps( - self, caps: Gst.Caps, resolve_method: T.Literal["limit"], limit: int - ): ... def get_all_fixated_caps( self, caps: Gst.Caps, resolve_method: RangeResolveMethod, - limit: T.Optional[int] = None, + limit: int = 10_000, ) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps obj @@ -145,7 +137,7 @@ def get_all_fixated_caps( - "limit" => Use the caps.is_fixed while loop until we reaches limit :param limit: the limit to use for the "limit" resolver - - ignored if resolve_method != "limit" + - if resolve method is remap, this is still in effect :return: a list of fixed caps """ if caps.is_fixed(): @@ -193,7 +185,13 @@ def get_all_fixated_caps( finite_list = temp.get_list(prop)[1] if finite_list is not None: - assert finite_list.n_values != 0 + if finite_list.n_values == 0: + print( + "Resolve method is remap," + "but original caps doesn't have any", + "of the common values.", + "Skipping.", + ) s_i.set_list( prop, finite_list, @@ -202,11 +200,12 @@ def get_all_fixated_caps( caps_i = Gst.Caps.from_string(s_i.to_string()) while not caps_i.is_fixed() and not caps_i.is_empty(): - if resolve_method == "limit": - assert limit - if len(fixed_caps) >= limit: - break + if len(fixed_caps) >= limit: + break fixed_cap = caps_i.fixate() # type: Gst.Caps + if len(fixed_caps) != 0 and fixed_cap.is_equal(fixed_caps[-1]): + # if the caps is already seen + break fixed_caps.append(fixed_cap) caps_i = caps_i.subtract(fixed_cap) @@ -488,7 +487,7 @@ def record_video( 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert name=converter", # 2 - "jpegenc", # 3, avoid massiave uncompressed videos + "jpegenc", # 3, avoid massive uncompressed videos "matroskamux", # 4 "filesink location={}".format(file_path), # 5 ] From 340005c564290477c595a9029686241c55160046 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 11:48:34 +0800 Subject: [PATCH 50/79] test: caps resolver tests --- .../tests/test_camera_pipelines.py | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 checkbox-support/checkbox_support/tests/test_camera_pipelines.py diff --git a/checkbox-support/checkbox_support/tests/test_camera_pipelines.py b/checkbox-support/checkbox_support/tests/test_camera_pipelines.py new file mode 100644 index 0000000000..f80b356dae --- /dev/null +++ b/checkbox-support/checkbox_support/tests/test_camera_pipelines.py @@ -0,0 +1,101 @@ +import itertools +from unittest.mock import MagicMock, patch + +import unittest as ut +import sys +import gi +from checkbox_support.camera_pipelines import CapsResolver + +gi.require_version("Gst", "1.0") +from gi.repository import Gst + + +class CapsResolverTests(ut.TestCase): + + resolver = None + + @classmethod + def setUpClass(cls): + Gst.init([]) + cls.resolver = CapsResolver() + + def test_fraction_list(self): + out = self.resolver.get_all_fixated_caps( + Gst.Caps.from_string( + "image/jpeg, width=1280, height=720, framerate={ (fraction)30/1, (fraction)15/1 }" + ), + "remap", + ) + + self.assertEqual( + [c.to_string() for c in out], + [ + "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)30/1", + "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)15/1", + ], + ) + + def test_fraction_range(self): + out = self.resolver.get_all_fixated_caps( + Gst.Caps.from_string( + "image/jpeg, width=1280, height=720, framerate=[ (fraction)1/1, (fraction)100/1 ]" + ), + "remap", + ) + + self.assertCountEqual( # quality without order + [cap.to_string() for cap in out], + [ + "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)15/1", + "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)30/1", + "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)60/1", + ], + ) + + def test_int_range(self): + out = self.resolver.get_all_fixated_caps( + Gst.Caps.from_string( + "image/jpeg, width=[1, 1280], height=[1, 720], framerate=[ (fraction)1/1, (fraction)100/1 ]" + ), + "remap", + ) + answer = [ + "image/jpeg, width=(int){}, height=(int){}, framerate=(fraction){}".format( + width, height, framerate + ) + for width, height, framerate in itertools.product( + (640, 1280), (480, 720), ("15/1", "30/1", "60/1") + ) + ] + self.assertCountEqual( # quality without order + [cap.to_string() for cap in out], + answer, + ) + + def test_all_lists(self): + widths = ["20", "30", "40"] + heights = ["10", "720"] + framerates = ["15/1", "30/1", "60/1"] + answer = [ + "image/jpeg, width=(int){}, height=(int){}, framerate=(fraction){}".format( + width, height, framerate + ) + for width, height, framerate in itertools.product( + widths, heights, framerates + ) + ] + out = self.resolver.get_all_fixated_caps( + Gst.Caps.from_string( + "image/jpeg, width={{{}}}, height={{{}}}, framerate={{{}}}".format( + ", ".join(widths), + ", ".join(heights), + ", ".join(framerates), + ) + ), + "remap", + ) + self.assertCountEqual([cap.to_string() for cap in out], answer) + + +if __name__ == "__main__": + ut.main() From 824a854bf0ca13d2aff5906d3f759889c0c87a63 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 12:25:13 +0800 Subject: [PATCH 51/79] fix: typing.Literal doesn't exist in 3.5 --- checkbox-support/checkbox_support/camera_pipelines.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 47555f81a6..51edab2560 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -31,10 +31,6 @@ class CapsResolver: # (top, bottom) or (numerator, denominator) FractionTuple = T.Tuple[int, int] - # Used when we encounter IntRange or FractionRange types - # Simply fixating the caps will produce too many caps, - # so we restrict to these common ones - RangeResolveMethod = T.Literal["remap", "limit"] RANGE_REMAP = { "width": [640, 1280, 1920, 2560, 3840], "height": [480, 720, 1080, 1440, 2160], @@ -125,7 +121,7 @@ def remap_range_to_list( def get_all_fixated_caps( self, caps: Gst.Caps, - resolve_method: RangeResolveMethod, + resolve_method: str, # type T.Literal["remap", "limit"] limit: int = 10_000, ) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps obj @@ -266,6 +262,7 @@ def run_pipeline( ): loop = GLib.MainLoop() remaining_timeouts = set() # type: set[int] + def gst_msg_handler(_, msg: Gst.Message): if msg.type == Gst.MessageType.EOS: logger.info("Received EOS") @@ -391,7 +388,7 @@ def take_photo( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - delay_seconds=0 + delay_seconds=0, ): """Take a photo using the source element @@ -477,7 +474,7 @@ def record_video( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - record_n_seconds=0 + record_n_seconds=0, ): assert file_path.endswith( ".mkv" From 0582a49c5c8ee425b450cfb256ef68b4e5a8801d Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 12:30:02 +0800 Subject: [PATCH 52/79] fix: formatter returned incompatible syntax --- checkbox-support/checkbox_support/camera_pipelines.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 51edab2560..b15265c79a 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -122,7 +122,7 @@ def get_all_fixated_caps( self, caps: Gst.Caps, resolve_method: str, # type T.Literal["remap", "limit"] - limit: int = 10_000, + limit: int = 10000, ) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps obj @@ -388,7 +388,7 @@ def take_photo( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - delay_seconds=0, + delay_seconds=0 ): """Take a photo using the source element @@ -474,7 +474,7 @@ def record_video( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - record_n_seconds=0, + record_n_seconds=0 ): assert file_path.endswith( ".mkv" From 88a00001d9c8dfe93e060c7feb1d73fb9a3a004d Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 14:08:03 +0800 Subject: [PATCH 53/79] fix: handle x-bayer --- checkbox-support/checkbox_support/camera_pipelines.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index b15265c79a..6c368580af 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -388,7 +388,7 @@ def take_photo( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - delay_seconds=0 + delay_seconds: int ): """Take a photo using the source element @@ -426,6 +426,8 @@ def take_photo( elif mime_type == "video/x-raw": # don't need a decoder for raw str_elements[1] = "" + elif mime_type == "video/x-bayer": + str_elements[1] = "bayer2rgb" # else case is using decodebin as a fallback else: # decode bin doesn't work with video/x-raw @@ -501,6 +503,10 @@ def record_video( str_elements[1] = "jpegdec" elif mime_type == "video/x-raw": str_elements[1] = "" + elif mime_type == "video/x-bayer": + # bayer2rgb is not considered a decoder + # so decodebin can't automatically find this + str_elements[1] = "bayer2rgb" else: # decodebin doesn't work with video/x-raw str_elements[0] = str_elements[1] = "" From ba3b4432106480a1a01cde41523b7727a0bb77d5 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 17:23:28 +0800 Subject: [PATCH 54/79] fix: dangling parent object, support encoding choice --- .../checkbox_support/camera_pipelines.py | 17 +++++++---------- .../base/bin/camera_test_auto_gst_source.py | 17 ++++++++++++++--- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 6c368580af..1aa888c8ff 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -221,7 +221,7 @@ def elem_to_str(element: Gst.Element) -> str: properties = element.list_properties() # list[GObject.GParamSpec] element_name = element.get_factory().get_name() - exclude = ["parent", "client-name"] + exclude = ["client-name"] prop_strings = [] # type: list[str] for prop in properties: @@ -280,8 +280,9 @@ def gst_msg_handler(_, msg: Gst.Message): for timeout in remaining_timeouts: # if the pipeline is terminated early, remove all timers # because loop.quit() won't remove those - # that are already scheduled - # this may produce warnings, but won't stop execution + # that are already scheduled => segfault (EOS on null pipeline) + # calling source_remove may produce warnings, + # but won't stop normal execution GLib.source_remove(timeout) if msg.type == Gst.MessageType.WARNING: @@ -476,18 +477,14 @@ def record_video( *, caps: T.Optional[Gst.Caps] = None, file_path: str, - record_n_seconds=0 + record_n_seconds: int, + encoding_profile: str ): - assert file_path.endswith( - ".mkv" - ), "This function uses matroskamux, so the filename must end in .mkv" - str_elements = [ 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert name=converter", # 2 - "jpegenc", # 3, avoid massive uncompressed videos - "matroskamux", # 4 + "encodebin profile={}".format(encoding_profile), "filesink location={}".format(file_path), # 5 ] diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 139ff4d417..4cc8a3b9c0 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -211,7 +211,9 @@ def parse_args(): action="store_true", help="Skip video dimension & duration validation", ) - encoding_group = video_subparser.add_mutually_exclusive_group() + encoding_group = video_subparser.add_mutually_exclusive_group( + required=True + ) encoding_group.add_argument( "--encoding", type=str, @@ -289,7 +291,7 @@ def main(): ) for dev_i, device in enumerate(devices): - dev_element = device.create_element() + dev_element = device.create_element() # type: Gst.Element if args.subcommand == "show-viewfinder": cam.show_viewfinder(dev_element, show_n_seconds=args.seconds) @@ -315,6 +317,10 @@ def main(): ) for cap_i, capability in enumerate(all_fixed_caps): + # since we use the same element for all caps + # previous parent pipelines are not auto removed + # need to explicitly unref + dev_element.unparent() cap_struct = capability.get_structure(0) if args.subcommand == "take-photo": logger.info( @@ -342,7 +348,7 @@ def main(): expected_height=cap_struct.get_int("height").value, ) elif args.subcommand == "record-video": - file_path = "{}/video_dev_{}_cap_{}.mkv".format( + file_path = "{}/video_dev_{}_cap_{}.mp4".format( args.path, dev_i, cap_i ) cam.record_video( @@ -350,6 +356,11 @@ def main(): file_path=file_path, caps=capability, record_n_seconds=args.seconds, + encoding_profile=( + ENCODING_PROFILES[args.encoding] + if hasattr(args, "encoding") + else args.custom_encoding_string + ), ) if args.skip_validation: From 2e33e0890d9c4b2d6e6c08bcfe55f1420163b7c0 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 23:15:16 +0800 Subject: [PATCH 55/79] fix: no more remove source warnings --- .../checkbox_support/camera_pipelines.py | 70 +++++++++++++------ 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 1aa888c8ff..904a98c8a3 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -261,14 +261,21 @@ def run_pipeline( intermediate_calls: T.List[T.Tuple[int, TimeoutCallback]] = [], ): loop = GLib.MainLoop() - remaining_timeouts = set() # type: set[int] + timeout_sources = set() # type: set[GLib.Source] + + assert ( + run_n_seconds is None or run_n_seconds >= 1 + ), "run_n_seconds must be >= 1 if specified" def gst_msg_handler(_, msg: Gst.Message): if msg.type == Gst.MessageType.EOS: - logger.info("Received EOS") + logger.debug("Received EOS.") loop.quit() pipeline.set_state(Gst.State.NULL) + for timeout in timeout_sources: + timeout.destroy() + if msg.type == Gst.MessageType.ERROR: logger.error( "Pipeline encountered an error, stopping. " @@ -277,13 +284,13 @@ def gst_msg_handler(_, msg: Gst.Message): loop.quit() pipeline.set_state(Gst.State.NULL) - for timeout in remaining_timeouts: + for timeout in timeout_sources: # if the pipeline is terminated early, remove all timers # because loop.quit() won't remove those # that are already scheduled => segfault (EOS on null pipeline) # calling source_remove may produce warnings, # but won't stop normal execution - GLib.source_remove(timeout) + timeout.destroy() if msg.type == Gst.MessageType.WARNING: logger.warning(Gst.Message.parse_warning(msg)) @@ -293,30 +300,39 @@ def send_eos(): pipeline.send_event(Gst.Event.new_eos()) if run_n_seconds: - remaining_timeouts.add( - GLib.timeout_add_seconds(run_n_seconds, send_eos) + eos_timeout_id = GLib.timeout_add_seconds(run_n_seconds, send_eos) + # get the actual source object, so we can call .destroy() + # removing a timeout by id will cause warnings if it doesn't exist + timeout_sources.add( + loop.get_context().find_source_by_id(eos_timeout_id) ) for delay, call in intermediate_calls: assert run_n_seconds is None or delay < run_n_seconds, ( "Delay for each call must be smaller than total run seconds, " - " (Got delay = {}, run_n_seconds = {})".format( - delay, run_n_seconds + " (Got delay = {} for {}, run_n_seconds = {})".format( + delay, call.__name__, run_n_seconds ) ) - remaining_timeouts.add(GLib.timeout_add_seconds(delay, call)) + timeout_id = GLib.timeout_add_seconds(delay, call) + timeout_sources.add(loop.get_context().find_source_by_id(timeout_id)) bus = pipeline.get_bus() bus.add_signal_watch() bus.connect("message", gst_msg_handler) pipeline.set_state(Gst.State.PLAYING) - source_state = pipeline.get_child_by_index(0).get_state(1 * Gst.SECOND)[0] - if source_state != Gst.StateChangeReturn.SUCCESS: + # get_state returns (state_change_result, curr_state, target_state) + source_state_change_result = pipeline.get_child_by_index(0).get_state( + 500 * Gst.MSECOND + )[0] + if source_state_change_result != Gst.StateChangeReturn.SUCCESS: pipeline.set_state(Gst.State.NULL) raise RuntimeError( "Failed to transition to playing state. " - "Source is still in {} state after 1 second.".format(source_state) + "Source is still in {} state after 500ms.".format( + source_state_change_result + ) ) logger.info("[ OK ] Pipeline is playing!") @@ -422,7 +438,8 @@ def take_photo( mime_type = caps.get_structure(0).get_name() # type: str if mime_type == "image/jpeg": - # decodebin has funny clock problem with live sources in image/jpeg + # decodebin has a clock problem with pipewiresrc + # that outputs image/jpeg str_elements[1] = "jpegdec" elif mime_type == "video/x-raw": # don't need a decoder for raw @@ -455,18 +472,27 @@ def open_valve(): logger.debug("Opening valve!") valve.set_property("drop", False) - logger.info( - "Created photo pipeline with {} second delay. ".format(delay_seconds) - + '"{} ! {}"'.format(elem_to_str(source), partial) - ) - logger.debug("Setting playing state") + delay_seconds = max(delay_seconds, 0) + if delay_seconds <= 0: + logger.info( + "Created photo pipeline with no delay. " + + '"{} ! {}"'.format(elem_to_str(source), partial) + ) + valve.set_property("drop", False) + intermediate_calls = [] + else: + logger.info( + "Created photo pipeline with {} second delay. ".format( + delay_seconds + ) + + '"{} ! {}"'.format(elem_to_str(source), partial) + ) + intermediate_calls = [(delay_seconds, open_valve)] run_pipeline( pipeline, - delay_seconds + 1, # workaround for now, weird problem with ref count - intermediate_calls=[ - (delay_seconds, open_valve), - ], + delay_seconds + 1, + intermediate_calls=intermediate_calls, ) logger.info("[ OK ] Photo was saved to {}".format(file_path)) From 1a84ff9e1e1243ee66b00a812a79f105c151c411 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 23:30:00 +0800 Subject: [PATCH 56/79] style: update comments --- .../checkbox_support/camera_pipelines.py | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 904a98c8a3..6d12581216 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -200,7 +200,12 @@ def get_all_fixated_caps( break fixed_cap = caps_i.fixate() # type: Gst.Caps if len(fixed_caps) != 0 and fixed_cap.is_equal(fixed_caps[-1]): - # if the caps is already seen + # if the caps is already seen last time, + # we are probably stuck at an unresolvable value + # can happen e.g when we have framerate = [1/3, 1/4] + # - doesn't contain any known value + # - fixate() will keep returning the same thing + # - subtract() does nothing break fixed_caps.append(fixed_cap) caps_i = caps_i.subtract(fixed_cap) @@ -221,7 +226,7 @@ def elem_to_str(element: Gst.Element) -> str: properties = element.list_properties() # list[GObject.GParamSpec] element_name = element.get_factory().get_name() - exclude = ["client-name"] + exclude = ["parent", "client-name"] prop_strings = [] # type: list[str] for prop in properties: @@ -286,10 +291,9 @@ def gst_msg_handler(_, msg: Gst.Message): for timeout in timeout_sources: # if the pipeline is terminated early, remove all timers - # because loop.quit() won't remove those + # because loop.quit() won't remove/stop those # that are already scheduled => segfault (EOS on null pipeline) - # calling source_remove may produce warnings, - # but won't stop normal execution + # See: https://docs.gtk.org/glib/method.MainLoop.quit.html timeout.destroy() if msg.type == Gst.MessageType.WARNING: @@ -301,8 +305,11 @@ def send_eos(): if run_n_seconds: eos_timeout_id = GLib.timeout_add_seconds(run_n_seconds, send_eos) - # get the actual source object, so we can call .destroy() - # removing a timeout by id will cause warnings if it doesn't exist + # get the actual source object, so we can call .destroy() later. + # Removing a timeout by id will cause warnings if it doesn't exist, + # but destroying an unused source is ok + # See: https://docs.gtk.org/glib/method.Source.destroy.html + # and: https://docs.gtk.org/glib/type_func.Source.remove.html timeout_sources.add( loop.get_context().find_source_by_id(eos_timeout_id) ) @@ -445,6 +452,8 @@ def take_photo( # don't need a decoder for raw str_elements[1] = "" elif mime_type == "video/x-bayer": + # bayer2rgb is not considered a decoder + # so decodebin can't automatically find this str_elements[1] = "bayer2rgb" # else case is using decodebin as a fallback else: @@ -495,7 +504,11 @@ def open_valve(): intermediate_calls=intermediate_calls, ) - logger.info("[ OK ] Photo was saved to {}".format(file_path)) + logger.info( + '[ OK ] Photo pipeline for "{}" capability has finished!'.format( + caps.to_string() if caps else "device default" + ) + ) def record_video( @@ -510,8 +523,8 @@ def record_video( 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert name=converter", # 2 - "encodebin profile={}".format(encoding_profile), - "filesink location={}".format(file_path), # 5 + "encodebin profile={}".format(encoding_profile), # 3 + "filesink location={}".format(file_path), # 4 ] head_elem_name = "source-caps" @@ -527,11 +540,8 @@ def record_video( elif mime_type == "video/x-raw": str_elements[1] = "" elif mime_type == "video/x-bayer": - # bayer2rgb is not considered a decoder - # so decodebin can't automatically find this str_elements[1] = "bayer2rgb" else: - # decodebin doesn't work with video/x-raw str_elements[0] = str_elements[1] = "" head_elem_name = "converter" @@ -565,13 +575,3 @@ def record_video( + "{}".format(caps.to_string() if caps else "[device default]") + " was saved to {}".format(file_path) ) - - """record - videotestsrc num-buffers=120 ! - queue ! - encodebin profile="video/quicktime,variant=iso:video/x-h264" ! - multifilesink location=video.mp4 - """ - """decode - filesrc location=video.mp4 ! decodebin ! autovideosink - """ From 81d4211e172d16d0601f42eb392d4fa90e13d8ed Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Thu, 2 Jan 2025 23:43:30 +0800 Subject: [PATCH 57/79] refactor: minor refactor for msg handler --- .../checkbox_support/camera_pipelines.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 6d12581216..bbf2a7030f 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -3,6 +3,8 @@ import typing as T import logging +from numpy import True_ + logger = logging.getLogger(__name__) logging.basicConfig( @@ -273,19 +275,22 @@ def run_pipeline( ), "run_n_seconds must be >= 1 if specified" def gst_msg_handler(_, msg: Gst.Message): + should_quit = False + if msg.type == Gst.MessageType.WARNING: + logger.warning(Gst.Message.parse_warning(msg)) + if msg.type == Gst.MessageType.EOS: logger.debug("Received EOS.") - loop.quit() - pipeline.set_state(Gst.State.NULL) - - for timeout in timeout_sources: - timeout.destroy() + should_quit = True if msg.type == Gst.MessageType.ERROR: logger.error( "Pipeline encountered an error, stopping. " + str(Gst.Message.parse_error(msg)) ) + should_quit = True + + if should_quit: loop.quit() pipeline.set_state(Gst.State.NULL) @@ -296,9 +301,6 @@ def gst_msg_handler(_, msg: Gst.Message): # See: https://docs.gtk.org/glib/method.MainLoop.quit.html timeout.destroy() - if msg.type == Gst.MessageType.WARNING: - logger.warning(Gst.Message.parse_warning(msg)) - def send_eos(): logger.debug("Sending EOS.") pipeline.send_event(Gst.Event.new_eos()) From 6469f08513abea72f38694d34d1b2c86fdf03ca2 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Fri, 3 Jan 2025 10:29:02 +0800 Subject: [PATCH 58/79] style: rename remap to known_values --- .../checkbox_support/camera_pipelines.py | 52 +++++++++---------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index bbf2a7030f..0147bf39ec 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -3,9 +3,6 @@ import typing as T import logging -from numpy import True_ - - logger = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s %(levelname)s - %(message)s\n", @@ -33,7 +30,7 @@ class CapsResolver: # (top, bottom) or (numerator, denominator) FractionTuple = T.Tuple[int, int] - RANGE_REMAP = { + KNOWN_RANGE_VALUES = { "width": [640, 1280, 1920, 2560, 3840], "height": [480, 720, 1080, 1440, 2160], "framerate": [ @@ -86,16 +83,16 @@ def extract_int_range( return low.get_int(prop_name)[1], high.get_int(prop_name)[1] @T.overload - def remap_range_to_list( + def select_known_values_from_range( self, prop: str, low: int, high: int ) -> T.List[int]: ... @T.overload - def remap_range_to_list( + def select_known_values_from_range( self, prop: str, low: FractionTuple, high: FractionTuple ) -> T.List[FractionTuple]: ... - def remap_range_to_list( + def select_known_values_from_range( self, prop: str, low: T.Union[int, FractionTuple], @@ -110,10 +107,10 @@ def remap_range_to_list( """ out = [] assert ( - prop in self.RANGE_REMAP - ), "Property {} does not have a remap definition".format(prop) + prop in self.KNOWN_RANGE_VALUES + ), "Property {} does not have a known value definition".format(prop) - for val in self.RANGE_REMAP[prop]: + for val in self.KNOWN_RANGE_VALUES[prop]: # lt gt are defined as pairwise comparison on tuples if val >= low and val <= high: out.append(val) @@ -123,7 +120,7 @@ def remap_range_to_list( def get_all_fixated_caps( self, caps: Gst.Caps, - resolve_method: str, # type T.Literal["remap", "limit"] + resolve_method: str, # type T.Literal["known_values", "limit"] limit: int = 10000, ) -> T.List[Gst.Caps]: """Gets all the fixated(1 value per property) caps from a Gst.Caps obj @@ -131,11 +128,11 @@ def get_all_fixated_caps( :param caps: a mixed Gst.Caps :param resolve_method: how to resolve IntRange and FractionRange values - Only applies to width, height, and framerate for now - - "remap" => picks out a set of common values within the original range + - "known_values" => picks out known values within the original range - "limit" => Use the caps.is_fixed while loop until we reaches limit :param limit: the limit to use for the "limit" resolver - - if resolve method is remap, this is still in effect + - if resolve method is known_values, this is still in effect :return: a list of fixed caps """ if caps.is_fixed(): @@ -145,22 +142,22 @@ def get_all_fixated_caps( for i in range(caps.get_size()): struct = caps.get_structure(i) - caps_i = Gst.Caps.from_string(struct.to_string()) # type: Gst.Caps - if resolve_method == "remap": - for prop in self.RANGE_REMAP.keys(): - s_i = caps_i.get_structure(0) # type: Gst.Structure + if resolve_method == "known_values": + for prop in self.KNOWN_RANGE_VALUES.keys(): finite_list = None # type: GObject.ValueArray | None - if s_i.has_field_typed(prop, Gst.IntRange): - low, high = self.extract_int_range(s_i, prop) + if struct.has_field_typed(prop, Gst.IntRange): + low, high = self.extract_int_range(struct, prop) finite_list = GObject.ValueArray() - for elem in self.remap_range_to_list(prop, low, high): + for elem in self.select_known_values_from_range( + prop, low, high + ): finite_list.append(elem) # type: ignore - elif s_i.has_field_typed(prop, Gst.FractionRange): - low, high = self.extract_fraction_range(s_i, prop) - fraction_list = self.remap_range_to_list( + elif struct.has_field_typed(prop, Gst.FractionRange): + low, high = self.extract_fraction_range(struct, prop) + fraction_list = self.select_known_values_from_range( prop, low, high ) # workaround missing Gst.Fraction @@ -184,19 +181,20 @@ def get_all_fixated_caps( if finite_list is not None: if finite_list.n_values == 0: - print( - "Resolve method is remap," + logger.debug( + "Resolve method is known_values," "but original caps doesn't have any", "of the common values.", "Skipping.", ) - s_i.set_list( + struct.set_list( prop, finite_list, ) - caps_i = Gst.Caps.from_string(s_i.to_string()) + caps_i = Gst.Caps.from_string(struct.to_string()) + caps_i = Gst.Caps.from_string(struct.to_string()) # type: Gst.Caps while not caps_i.is_fixed() and not caps_i.is_empty(): if len(fixed_caps) >= limit: break From acfd78aed27d0fe04bea68cd051b962ab6c8eefe Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Fri, 3 Jan 2025 13:49:48 +0800 Subject: [PATCH 59/79] test: test job tests --- .../tests/test_camera_test_auto_gst_source.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 providers/base/tests/test_camera_test_auto_gst_source.py diff --git a/providers/base/tests/test_camera_test_auto_gst_source.py b/providers/base/tests/test_camera_test_auto_gst_source.py new file mode 100644 index 0000000000..438ba8723c --- /dev/null +++ b/providers/base/tests/test_camera_test_auto_gst_source.py @@ -0,0 +1,72 @@ +import unittest as ut +from unittest.mock import MagicMock, patch +from shlex import split as sh_split +import sys + + +mock_gi = MagicMock() + + +@patch.dict( + sys.modules, + { + "gi": mock_gi, + "gi.repository": mock_gi.repository, + "logging": MagicMock(), + }, +) +class CameraTestAutoGstSourceTests(ut.TestCase): + def test_correct_subcommand_is_executed(self): + import camera_test_auto_gst_source as CTAGS + + with patch("os.path.isdir") as mock_isdir, patch( + "os.path.isfile" + ) as mock_isfile, patch( + "camera_test_auto_gst_source.get_devices" + ) as mock_get_devices, patch( + "camera_test_auto_gst_source.cam" + ) as mock_cam, patch( + "camera_test_auto_gst_source.MediaValidator" + ) as mock_validator: + mock_isdir.return_value = True + mock_isfile.return_value = True + mock_get_devices.return_value = [MagicMock()] + mock_validator.validate_image_dimensions.return_value = True + + # print(dir(mock_cam.take_photo)) + mock_resolver = MagicMock() + mock_cam.CapsResolver.return_value = mock_resolver + mock_resolver.get_all_fixated_caps.return_value = [MagicMock()] + + with patch( + "sys.argv", + sh_split( + "camera_test_auto_gst_source.py take-photo -p some/dir" + ), + ): + CTAGS.main() + print(dir(mock_cam.take_photo)) + self.assertEqual(mock_cam.take_photo.call_count, 1) + + with patch( + "sys.argv", + sh_split( + "camera_test_auto_gst_source.py record-video " + "-p some/dir --encoding mp4_h264" + ), + ): + CTAGS.main() + self.assertEqual(mock_cam.record_video.call_count, 1) + + with patch( + "sys.argv", + sh_split("camera_test_auto_gst_source.py show-viewfinder"), + ): + CTAGS.main() + self.assertEqual(mock_cam.show_viewfinder.call_count, 1) + def test_correct_subcommand_is_executed(self): + ... + + +if __name__ == "__main__": + ut.main() From 180454ab8270221fdaf07d5adb178b1979925b60 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 11:36:48 +0800 Subject: [PATCH 60/79] fix: support custom file format, always use abs path --- .../base/bin/camera_test_auto_gst_source.py | 87 ++++++++++++++----- 1 file changed, 66 insertions(+), 21 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 4cc8a3b9c0..6ebd3d46c7 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -30,9 +30,12 @@ class MediaValidator: def validate_image_dimensions( image_file_path: str, *, expected_width: int, expected_height: int ) -> bool: - assert os.path.isfile( - image_file_path - ), "Image file doesn't exist at {}".format(image_file_path) + if not os.path.isfile(image_file_path): + logger.error( + "Image file doesn't exist at {}".format(image_file_path) + ) + return False + image = PIL.Image.open(image_file_path) passed = True @@ -61,8 +64,14 @@ def validate_video_info( expected_height: int, expected_duration_seconds: int, expected_fps: int, - duration_tolerance_seconds=0.1 + duration_tolerance_seconds: float ) -> bool: + if not os.path.isfile(video_file_path): + logger.error( + "Video file doesn't exist at {}".format(video_file_path) + ) + return False + discoverer = GstPbutils.Discoverer() video_file_path.lstrip("/") @@ -119,9 +128,18 @@ def validate_video_info( ENCODING_PROFILES = { - "mp4_h264": "video/quicktime,variant=iso:video/x-h264", - "ogv_theora": "application/ogg:video/x-theora", - "webm_vp8": "video/webm:video/x-vp8", + "mp4_h264": { + "profile_str": "video/quicktime,variant=iso:video/x-h264", + "file_extension": "mp4", + }, + "ogv_theora": { + "profile_str": "application/ogg:video/x-theora", + "file_extension": "ogv", + }, + "webm_vp8": { + "profile_str": "video/webm:video/x-vp8", + "file_extension": "webm", + }, } @@ -163,7 +181,7 @@ def parse_args(): action="store_true", help="Skip image dimension validation", ) - default_max_caps = 100 + default_max_caps = 10000 photo_subparser.add_argument( "--max-caps", type=int, @@ -234,6 +252,16 @@ def parse_args(): "Only use this option if you have a custom encoding string." ), ) + video_subparser.add_argument( + "--file-extension", + type=str, + help=( + "Custom file extension. " + "This is required when --custom-encoding-string is specified. " + "If --encoding is specified, " + "this overrides the default file extension." + ), + ) # need to explicitly check this viewfinder_subparser = subparser.add_parser("show-viewfinder") default_viewfinder_seconds = 10 @@ -261,15 +289,19 @@ def parse_args(): def main(): args = parse_args() - + print(args) if os.getuid() == 0: logger.warning( "Running this script as root. " "This may lead to different results than running as regular user." ) + abs_path = os.path.abspath( + os.path.expanduser(os.path.expandvars(args.path)) + ) + if args.subcommand == "play-video": - cam.play_video(args.path) + cam.play_video(abs_path) return devices = get_devices() @@ -297,16 +329,16 @@ def main(): cam.show_viewfinder(dev_element, show_n_seconds=args.seconds) continue - if not os.path.isdir(args.path): + if not os.path.isdir(abs_path): # must validate early # multifilesink does not check if the path exists raise FileNotFoundError( - 'Path "{}" does not exist'.format(args.path) + 'Path "{}" does not exist'.format(abs_path) ) resolver = cam.CapsResolver() all_fixed_caps = resolver.get_all_fixated_caps( - device.get_caps(), "remap" + device.get_caps(), "known_values" ) logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) @@ -330,7 +362,7 @@ def main(): + '"{}"'.format(device.get_display_name()), ) file_path = "{}/photo_dev_{}_cap_{}.jpeg".format( - args.path, dev_i, cap_i + abs_path, dev_i, cap_i ) cam.take_photo( dev_element, @@ -348,19 +380,32 @@ def main(): expected_height=cap_struct.get_int("height").value, ) elif args.subcommand == "record-video": - file_path = "{}/video_dev_{}_cap_{}.mp4".format( - args.path, dev_i, cap_i + if args.encoding is not None: + encoding_profile = ENCODING_PROFILES[args.encoding][ + "profile_str" + ] + file_extension = ENCODING_PROFILES[args.encoding][ + "file_extension" + ] + if args.file_extension is not None: + file_extension = args.file_extension + else: + encoding_profile = args.custom_encoding_string + assert args.file_extension, ( + "File extension must be specified " + "when using custom encoding string" + ) + file_extension = args.file_extension + + file_path = "{}/video_dev_{}_cap_{}.{}".format( + abs_path, dev_i, cap_i, file_extension ) cam.record_video( dev_element, file_path=file_path, caps=capability, record_n_seconds=args.seconds, - encoding_profile=( - ENCODING_PROFILES[args.encoding] - if hasattr(args, "encoding") - else args.custom_encoding_string - ), + encoding_profile=encoding_profile, ) if args.skip_validation: From d7eae4e9127c4117f4b1705ed2f05f46911e3b7a Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:37:40 +0800 Subject: [PATCH 61/79] fix: do not check file path for viewfinder, comments --- .../checkbox_support/camera_pipelines.py | 14 +++++++++----- providers/base/bin/camera_test_auto_gst_source.py | 11 ++++++----- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 0147bf39ec..cccf95bc45 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -153,7 +153,7 @@ def get_all_fixated_caps( for elem in self.select_known_values_from_range( prop, low, high ): - finite_list.append(elem) # type: ignore + finite_list.append(elem) elif struct.has_field_typed(prop, Gst.FractionRange): low, high = self.extract_fraction_range(struct, prop) @@ -330,15 +330,19 @@ def send_eos(): pipeline.set_state(Gst.State.PLAYING) # get_state returns (state_change_result, curr_state, target_state) + # the 1st element isn't named, so we must access by index source_state_change_result = pipeline.get_child_by_index(0).get_state( 500 * Gst.MSECOND - )[0] - if source_state_change_result != Gst.StateChangeReturn.SUCCESS: + ) + if source_state_change_result[0] != Gst.StateChangeReturn.SUCCESS: pipeline.set_state(Gst.State.NULL) raise RuntimeError( "Failed to transition to playing state. " - "Source is still in {} state after 500ms.".format( - source_state_change_result + + "Source is still in {} state after 500ms, ".format( + source_state_change_result.state + ) + + "was trying to transition to {}".format( + source_state_change_result.pending ) ) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 6ebd3d46c7..7315beedd1 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -289,18 +289,16 @@ def parse_args(): def main(): args = parse_args() - print(args) if os.getuid() == 0: logger.warning( "Running this script as root. " "This may lead to different results than running as regular user." ) - abs_path = os.path.abspath( - os.path.expanduser(os.path.expandvars(args.path)) - ) - if args.subcommand == "play-video": + abs_path = os.path.abspath( + os.path.expanduser(os.path.expandvars(args.path)) + ) cam.play_video(abs_path) return @@ -329,6 +327,9 @@ def main(): cam.show_viewfinder(dev_element, show_n_seconds=args.seconds) continue + abs_path = os.path.abspath( + os.path.expanduser(os.path.expandvars(args.path)) + ) if not os.path.isdir(abs_path): # must validate early # multifilesink does not check if the path exists From 713e47d8cf89a1e48228166006f9f0a333490724 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:55:35 +0800 Subject: [PATCH 62/79] refactor: remove unused video player --- .../checkbox_support/camera_pipelines.py | 22 ------------------- .../base/bin/camera_test_auto_gst_source.py | 18 --------------- 2 files changed, 40 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index cccf95bc45..5fa0103e00 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -350,28 +350,6 @@ def send_eos(): loop.run() -def play_video(filepath: str): - global Gtk - if not Gtk: - gi.require_version("Gtk", "3.0") - from gi.repository import Gtk as _Gtk # type: ignore - - Gtk = _Gtk - Gtk.init([]) - - pipeline = Gst.parse_launch( - " ! ".join( - [ - "filesrc location={}".format(filepath), - "decodebin", - "videoconvert", - "autovideosink", - ] - ) - ) - run_pipeline(pipeline) - - def show_viewfinder(source: Gst.Element, *, show_n_seconds=5): """Shows a viewfinder for the given camera source diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 7315beedd1..95a79338d5 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -15,8 +15,6 @@ ) logger.setLevel(logging.DEBUG) -Gtk = None - gi.require_version("Gst", "1.0") gi.require_version("GstPbutils", "1.0") from gi.repository import Gst, GstPbutils # type: ignore # noqa: E402 @@ -275,15 +273,6 @@ def parse_args(): default=default_viewfinder_seconds, ) - player_subparser = subparser.add_parser("play-video") - player_subparser.add_argument( - "-p", - "--path", - type=str, - help="Path to the video file", - required=True, - ) - return parser.parse_args() @@ -295,13 +284,6 @@ def main(): "This may lead to different results than running as regular user." ) - if args.subcommand == "play-video": - abs_path = os.path.abspath( - os.path.expanduser(os.path.expandvars(args.path)) - ) - cam.play_video(abs_path) - return - devices = get_devices() if len(devices) == 0: From d22187857cf41741be5fb6d1c6bddcf31b9a2702 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:33:27 +0800 Subject: [PATCH 63/79] test: image validator tests --- .../tests/test_camera_test_auto_gst_source.py | 53 +++++++++++++++++-- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/providers/base/tests/test_camera_test_auto_gst_source.py b/providers/base/tests/test_camera_test_auto_gst_source.py index 438ba8723c..13baa05c49 100644 --- a/providers/base/tests/test_camera_test_auto_gst_source.py +++ b/providers/base/tests/test_camera_test_auto_gst_source.py @@ -17,7 +17,6 @@ ) class CameraTestAutoGstSourceTests(ut.TestCase): def test_correct_subcommand_is_executed(self): - import camera_test_auto_gst_source as CTAGS with patch("os.path.isdir") as mock_isdir, patch( "os.path.isfile" @@ -28,6 +27,8 @@ def test_correct_subcommand_is_executed(self): ) as mock_cam, patch( "camera_test_auto_gst_source.MediaValidator" ) as mock_validator: + import camera_test_auto_gst_source as CTAGS + mock_isdir.return_value = True mock_isfile.return_value = True mock_get_devices.return_value = [MagicMock()] @@ -45,7 +46,6 @@ def test_correct_subcommand_is_executed(self): ), ): CTAGS.main() - print(dir(mock_cam.take_photo)) self.assertEqual(mock_cam.take_photo.call_count, 1) with patch( @@ -64,8 +64,53 @@ def test_correct_subcommand_is_executed(self): ): CTAGS.main() self.assertEqual(mock_cam.show_viewfinder.call_count, 1) - def test_correct_subcommand_is_executed(self): - ... + + @patch("os.path.isfile") + # @patch("gi.repository.GstPbutils.Discoverer") + @patch("camera_test_auto_gst_source.logger") + @patch("camera_test_auto_gst_source.Image") + def test_image_validator( + self, + mock_pil: MagicMock, + mock_logger: MagicMock, + mock_isfile: MagicMock, + ): + import camera_test_auto_gst_source as CTAGS + + expected_width = 640 + expected_height = 480 + + mock_isfile.return_value = True + mock_pil.open().width = expected_width + mock_pil.open().height = expected_height + + validator = CTAGS.MediaValidator() + + self.assertTrue( + validator.validate_image_dimensions( + "some/path", + expected_height=expected_height, + expected_width=expected_width, + ) + ) + + bad_width = 1237219831 + mock_pil.open().width = bad_width + mock_pil.open().height = expected_height + + self.assertFalse( + validator.validate_image_dimensions( + "some/path", + expected_height=expected_height, + expected_width=expected_width, + ) + ) + + mock_logger.error.assert_called_with( + "Image width mismatch. Expected = {}, actual = {}".format( + expected_width, bad_width + ) + ) if __name__ == "__main__": From 900193289a6901bfe53f6742d11c1a4fc08e15b7 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:35:42 +0800 Subject: [PATCH 64/79] style: bad log message ordering --- checkbox-support/checkbox_support/camera_pipelines.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 5fa0103e00..a5841677aa 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -465,12 +465,12 @@ def open_valve(): delay_seconds = max(delay_seconds, 0) if delay_seconds <= 0: + valve.set_property("drop", False) + intermediate_calls = [] logger.info( "Created photo pipeline with no delay. " + '"{} ! {}"'.format(elem_to_str(source), partial) ) - valve.set_property("drop", False) - intermediate_calls = [] else: logger.info( "Created photo pipeline with {} second delay. ".format( From 41fc8f6134bbe1c8690567c665413ea38432bbec Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:01:09 +0800 Subject: [PATCH 65/79] feat: allow 0 second photo pipelines --- .../checkbox_support/camera_pipelines.py | 38 +++++++++++-------- .../base/bin/camera_test_auto_gst_source.py | 12 ++++-- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index a5841677aa..920502a534 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -268,9 +268,11 @@ def run_pipeline( loop = GLib.MainLoop() timeout_sources = set() # type: set[GLib.Source] + # 0 means send eos as soon as possible + # we can do this because MainLoop doesn't start until pipeline has started assert ( - run_n_seconds is None or run_n_seconds >= 1 - ), "run_n_seconds must be >= 1 if specified" + run_n_seconds is None or run_n_seconds >= 0 + ), "run_n_seconds must be >= 0 if specified" def gst_msg_handler(_, msg: Gst.Message): should_quit = False @@ -303,7 +305,7 @@ def send_eos(): logger.debug("Sending EOS.") pipeline.send_event(Gst.Event.new_eos()) - if run_n_seconds: + if run_n_seconds is not None: eos_timeout_id = GLib.timeout_add_seconds(run_n_seconds, send_eos) # get the actual source object, so we can call .destroy() later. # Removing a timeout by id will cause warnings if it doesn't exist, @@ -412,8 +414,8 @@ def take_photo( 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 "videoconvert name=converter", # 2 - "valve name=photo-valve drop=True", # 4 - "jpegenc", # 3 + "valve name=photo-valve drop=True", # 3 + "jpegenc", # 4 "multifilesink location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -443,10 +445,13 @@ def take_photo( str_elements[0] = str_elements[1] = str_elements[3] = "" head_elem_name = "converter" + delay_seconds = max(delay_seconds, 0) + if delay_seconds == 0: + str_elements[3] = "" + partial = " ! ".join(elem for elem in str_elements if elem) pipeline = Gst.parse_launch(partial) # type: Gst.Pipeline head_elem = pipeline.get_by_name(head_elem_name) - valve = pipeline.get_by_name("photo-valve") # parse the partial pipeline, then get head element by name assert pipeline.add( @@ -454,35 +459,36 @@ def take_photo( ), "Could not add source element {} to the pipeline".format( elem_to_str(source) ) - assert head_elem and valve + assert head_elem assert source.link( head_elem ), "Could not link source element to {}".format(head_elem) - def open_valve(): - logger.debug("Opening valve!") - valve.set_property("drop", False) - - delay_seconds = max(delay_seconds, 0) - if delay_seconds <= 0: - valve.set_property("drop", False) + if delay_seconds == 0: intermediate_calls = [] logger.info( "Created photo pipeline with no delay. " + '"{} ! {}"'.format(elem_to_str(source), partial) ) else: + valve = pipeline.get_by_name("photo-valve") + + def open_valve(): + assert valve + logger.debug("Opening valve!") + valve.set_property("drop", False) + + intermediate_calls = [(delay_seconds, open_valve)] logger.info( "Created photo pipeline with {} second delay. ".format( delay_seconds ) + '"{} ! {}"'.format(elem_to_str(source), partial) ) - intermediate_calls = [(delay_seconds, open_valve)] run_pipeline( pipeline, - delay_seconds + 1, + delay_seconds, intermediate_calls=intermediate_calls, ) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 95a79338d5..5476955350 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -299,7 +299,13 @@ def main(): print( '[ HINT ] For debugging, remove the "valve" element to get a pipeline', 'that can be run with "gst-launch-1.0".', - "Also keep the pipeline running for {} seconds".format(args.seconds), + ( + "Also keep the pipeline running for {} seconds.\n".format( + args.seconds + ) + if args.seconds > 0 + else "Terminate the pipeline as soon as possible.\n" + ), ) for dev_i, device in enumerate(devices): @@ -325,9 +331,9 @@ def main(): ) logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) - logger.info( + logger.info( # just an estimate "Test for this device may take {} seconds for {} caps.".format( - len(all_fixed_caps) * args.seconds, len(all_fixed_caps) + len(all_fixed_caps) * max(args.seconds, 1), len(all_fixed_caps) ) ) From b75dcd9e1396d3df461ad682fe7ea2fc69d20f6f Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:09:40 +0800 Subject: [PATCH 66/79] fix: disallow 0 second pipeline in video recording --- .../checkbox_support/camera_pipelines.py | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 920502a534..674dbcc23a 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -493,9 +493,9 @@ def open_valve(): ) logger.info( - '[ OK ] Photo pipeline for "{}" capability has finished!'.format( - caps.to_string() if caps else "device default" - ) + "[ OK ] Photo pipeline for this capability: " + + "{}".format(caps.to_string() if caps else "device default") + + " has finished!" ) @@ -507,6 +507,11 @@ def record_video( record_n_seconds: int, encoding_profile: str ): + assert record_n_seconds >= 1, ( + "Recording pipeline must run for at least 1 second. " + "Got {} seconds.".format(record_n_seconds) + ) + str_elements = [ 'capsfilter name=source-caps caps="{}"', # 0 "decodebin", # 1 @@ -549,17 +554,16 @@ def record_video( ), "Could not link source element to {}".format(head_elem) logger.info( - "[ OK ] Created video pipeline to record {} seconds".format( + "[ OK ] Created video pipeline to record {} seconds: ".format( record_n_seconds ) + + '"{} ! {}"'.format(elem_to_str(source), partial) ) - logger.info("{} ! {}".format(elem_to_str(source), partial)) - logger.debug("Setting playing state") run_pipeline(pipeline, record_n_seconds) logger.info( - "[ OK ] Video for this capability: " - + "{}".format(caps.to_string() if caps else "[device default]") - + " was saved to {}".format(file_path) + "[ OK ] Recording pipeline for this capability: " + + "{}".format(caps.to_string() if caps else "device default") + + " has finished!" ) From 3f7bcbe4bcdd1faaf2512ddf3aa079a700655a45 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:10:29 +0800 Subject: [PATCH 67/79] feat: only 1 image write when taking photos --- .../checkbox_support/camera_pipelines.py | 37 ++++++++++++++----- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 674dbcc23a..e13f034f4f 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -264,18 +264,18 @@ def run_pipeline( pipeline: Gst.Pipeline, run_n_seconds: T.Optional[int] = None, intermediate_calls: T.List[T.Tuple[int, TimeoutCallback]] = [], + custom_quit_handler: T.Optional[T.Callable[[Gst.Message], bool]] = None, ): loop = GLib.MainLoop() timeout_sources = set() # type: set[GLib.Source] - # 0 means send eos as soon as possible - # we can do this because MainLoop doesn't start until pipeline has started assert ( - run_n_seconds is None or run_n_seconds >= 0 - ), "run_n_seconds must be >= 0 if specified" + run_n_seconds is None or run_n_seconds >= 1 + ), "run_n_seconds must be >= 1 if specified" def gst_msg_handler(_, msg: Gst.Message): should_quit = False + if msg.type == Gst.MessageType.WARNING: logger.warning(Gst.Message.parse_warning(msg)) @@ -290,10 +290,12 @@ def gst_msg_handler(_, msg: Gst.Message): ) should_quit = True + if custom_quit_handler: + should_quit = custom_quit_handler(msg) + if should_quit: loop.quit() pipeline.set_state(Gst.State.NULL) - for timeout in timeout_sources: # if the pipeline is terminated early, remove all timers # because loop.quit() won't remove/stop those @@ -305,11 +307,11 @@ def send_eos(): logger.debug("Sending EOS.") pipeline.send_event(Gst.Event.new_eos()) - if run_n_seconds is not None: + if run_n_seconds: eos_timeout_id = GLib.timeout_add_seconds(run_n_seconds, send_eos) # get the actual source object, so we can call .destroy() later. # Removing a timeout by id will cause warnings if it doesn't exist, - # but destroying an unused source is ok + # but destroying an already destroyed source is ok # See: https://docs.gtk.org/glib/method.Source.destroy.html # and: https://docs.gtk.org/glib/type_func.Source.remove.html timeout_sources.add( @@ -391,6 +393,23 @@ def show_viewfinder(source: Gst.Element, *, show_n_seconds=5): ) +def msg_is_multifilesink_save(msg: Gst.Message) -> bool: + """Returns true when multifilesink saves a buffer + + :param msg: the GstMessage object + :return: whether msg is a multifilesink save message + """ + if msg.type == Gst.MessageType.ELEMENT: + struct = msg.get_structure() + return ( + struct is not None + and struct.get_name() == "GstMultiFileSink" + and struct.has_field("filename") + ) + else: + return False + + def take_photo( source: Gst.Element, *, @@ -416,7 +435,7 @@ def take_photo( "videoconvert name=converter", # 2 "valve name=photo-valve drop=True", # 3 "jpegenc", # 4 - "multifilesink location={}".format(file_path), # 5 + "multifilesink post-messages=True location={}".format(file_path), # 5 ] head_elem_name = "source-caps" @@ -488,8 +507,8 @@ def open_valve(): run_pipeline( pipeline, - delay_seconds, intermediate_calls=intermediate_calls, + custom_quit_handler=msg_is_multifilesink_save, ) logger.info( From 931d157e82b87cb17e29ad451ea365ce74cf633d Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 7 Jan 2025 13:42:54 +0800 Subject: [PATCH 68/79] style: comments --- .../checkbox_support/camera_pipelines.py | 33 +++++++++++++++---- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index e13f034f4f..015db6c311 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -216,17 +216,19 @@ def get_all_fixated_caps( return fixed_caps -def elem_to_str(element: Gst.Element) -> str: +def elem_to_str( + element: Gst.Element, exclude: T.List[str] = ["parent", "client-name"] +) -> str: """Prints an element to string - - Excluding parent & client name :param element: GStreamer element - :return: String representation + :param exclude: property names to exclude + :return: String representation. This is a best guess for debug purposes, + not 100% accurate since there can be arbitrary objects in properties. """ properties = element.list_properties() # list[GObject.GParamSpec] element_name = element.get_factory().get_name() - exclude = ["parent", "client-name"] prop_strings = [] # type: list[str] for prop in properties: @@ -266,6 +268,21 @@ def run_pipeline( intermediate_calls: T.List[T.Tuple[int, TimeoutCallback]] = [], custom_quit_handler: T.Optional[T.Callable[[Gst.Message], bool]] = None, ): + """Runs a GStreamer pipeline and handle Gst messages + + :param pipeline: the pipeline to run + :param run_n_seconds: Number of seconds to run the pipeline + before sending EOS, defaults to None + - If None, only wait for an EOS signal + :param intermediate_calls: list of functions to run + while the pipeline is running + - Each element is a (delay, callback) tuple + - Delay is the number of seconds to wait + (relative to the start of the pipeline) before calling the callback + :param custom_quit_handler: quit the pipeline if this function returns true + :raises RuntimeError: if the source element did not transition to playing + state in 500ms after set_state(PLAYING) is called + """ loop = GLib.MainLoop() timeout_sources = set() # type: set[GLib.Source] @@ -297,7 +314,7 @@ def gst_msg_handler(_, msg: Gst.Message): loop.quit() pipeline.set_state(Gst.State.NULL) for timeout in timeout_sources: - # if the pipeline is terminated early, remove all timers + # if the pipeline is terminated early, remove all timers asap # because loop.quit() won't remove/stop those # that are already scheduled => segfault (EOS on null pipeline) # See: https://docs.gtk.org/glib/method.MainLoop.quit.html @@ -333,11 +350,13 @@ def send_eos(): bus.connect("message", gst_msg_handler) pipeline.set_state(Gst.State.PLAYING) - # get_state returns (state_change_result, curr_state, target_state) - # the 1st element isn't named, so we must access by index + source_state_change_result = pipeline.get_child_by_index(0).get_state( 500 * Gst.MSECOND ) + # get_state returns a 3-tuple + # (Gst.StateChangeReturn, curr_state: Gst.State, target_state: Gst.State) + # the 1st element isn't named, so we must access by index if source_state_change_result[0] != Gst.StateChangeReturn.SUCCESS: pipeline.set_state(Gst.State.NULL) raise RuntimeError( From 05ba217623dc7c0a18627268873920be647b89ac Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Tue, 7 Jan 2025 17:08:39 +0800 Subject: [PATCH 69/79] refactor: use discoverer for images too --- .../base/bin/camera_test_auto_gst_source.py | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 5476955350..066d76860d 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -1,7 +1,6 @@ #! /usr/bin/python3 import os -import PIL.Image import gi from argparse import ArgumentParser import typing as T @@ -20,6 +19,7 @@ from gi.repository import Gst, GstPbutils # type: ignore # noqa: E402 gi.require_version("GLib", "2.0") +from gi.repository import GLib # type: ignore class MediaValidator: @@ -34,21 +34,35 @@ def validate_image_dimensions( ) return False - image = PIL.Image.open(image_file_path) - passed = True + discoverer = GstPbutils.Discoverer() + try: + info = discoverer.discover_uri("file://{}".format(image_file_path)) + except GLib.GError as e: + logger.error( + "Encountered an error when attempting to read {}.".format( + image_file_path + ) + + str(e) # cleaner message is in e.message + ) + return False - if image.width != expected_width: + image_video_stream = info.get_video_streams() + width = image_video_stream[0].get_width() # type: int + height = image_video_stream[0].get_height() # type: int + + passed = True + if width != expected_width: passed = False logger.error( "Image width mismatch. Expected = {}, actual = {}".format( - expected_width, image.width + expected_width, width ) ) - if image.height != expected_height: + if height != expected_height: passed = False logger.error( "Image height mismatch. Expected = {}, actual = {}".format( - expected_height, image.height + expected_height, height ) ) @@ -72,8 +86,7 @@ def validate_video_info( discoverer = GstPbutils.Discoverer() - video_file_path.lstrip("/") - info = discoverer.discover_uri("file://" + video_file_path) + info = discoverer.discover_uri("file://{}".format(video_file_path)) duration = info.get_duration() # type: int # This is in nanoseconds video_streams = info.get_video_streams() if len(video_streams) == 0: From ceaea0a602456d3b7b348abc95793a3486e4cb1c Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:08:35 +0800 Subject: [PATCH 70/79] fix: py3.5 quirks, error handling --- .../checkbox_support/camera_pipelines.py | 1 + .../base/bin/camera_test_auto_gst_source.py | 25 +++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 015db6c311..c80ef6ebc6 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -346,6 +346,7 @@ def send_eos(): timeout_sources.add(loop.get_context().find_source_by_id(timeout_id)) bus = pipeline.get_bus() + assert bus bus.add_signal_watch() bus.connect("message", gst_msg_handler) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 066d76860d..ff23d4fbe4 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -19,7 +19,7 @@ from gi.repository import Gst, GstPbutils # type: ignore # noqa: E402 gi.require_version("GLib", "2.0") -from gi.repository import GLib # type: ignore +from gi.repository import GLib # type: ignore # noqa: E402 class MediaValidator: @@ -37,7 +37,7 @@ def validate_image_dimensions( discoverer = GstPbutils.Discoverer() try: info = discoverer.discover_uri("file://{}".format(image_file_path)) - except GLib.GError as e: + except (GLib.GError, GLib.Error) as e: logger.error( "Encountered an error when attempting to read {}.".format( image_file_path @@ -86,7 +86,17 @@ def validate_video_info( discoverer = GstPbutils.Discoverer() - info = discoverer.discover_uri("file://{}".format(video_file_path)) + try: + info = discoverer.discover_uri("file://{}".format(video_file_path)) + except (GLib.GError, GLib.Error) as e: + logger.error( + "Encountered an error when attempting to read {}.".format( + video_file_path + ) + + str(e) + ) + return False + duration = info.get_duration() # type: int # This is in nanoseconds video_streams = info.get_video_streams() if len(video_streams) == 0: @@ -100,13 +110,13 @@ def validate_video_info( passed = True if ( - abs(duration - expected_duration_seconds * 10**9) - > duration_tolerance_seconds * 10**9 + abs(duration - expected_duration_seconds * Gst.SECOND) + > duration_tolerance_seconds * Gst.SECOND ): logger.error( "Duration not within tolerance. " "Got {}s, but expected {} +- {}s".format( - round(duration / (10**9), 3), + round(duration / Gst.SECOND, 3), expected_duration_seconds, duration_tolerance_seconds, ) @@ -168,7 +178,8 @@ def get_devices() -> T.List[Gst.Device]: def parse_args(): parser = ArgumentParser() - subparser = parser.add_subparsers(dest="subcommand", required=True) + subparser = parser.add_subparsers(dest="subcommand") + subparser.required = True # workaround for older python versions photo_subparser = subparser.add_parser("take-photo") default_wait_seconds = 2 photo_subparser.add_argument( From d107ab23149832a0c8e21e353c65bfbabaaf17d1 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:49:55 +0800 Subject: [PATCH 71/79] fix: custom handler should have lowest precedence --- checkbox-support/checkbox_support/camera_pipelines.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index c80ef6ebc6..4432767fd8 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -293,6 +293,10 @@ def run_pipeline( def gst_msg_handler(_, msg: Gst.Message): should_quit = False + if custom_quit_handler: + # has the lowest precedence, ERROR and EOS will always take over + should_quit = custom_quit_handler(msg) + if msg.type == Gst.MessageType.WARNING: logger.warning(Gst.Message.parse_warning(msg)) @@ -307,9 +311,6 @@ def gst_msg_handler(_, msg: Gst.Message): ) should_quit = True - if custom_quit_handler: - should_quit = custom_quit_handler(msg) - if should_quit: loop.quit() pipeline.set_state(Gst.State.NULL) @@ -531,6 +532,8 @@ def open_valve(): custom_quit_handler=msg_is_multifilesink_save, ) + # NOTE: reaching here just means the pipeline successfully stopped + # not necessarily stopped gracefully logger.info( "[ OK ] Photo pipeline for this capability: " + "{}".format(caps.to_string() if caps else "device default") From 5784fc575142a7672ca06192dfda2d634fd6d4f9 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:54:10 +0800 Subject: [PATCH 72/79] fix: remove timeout earlier --- checkbox-support/checkbox_support/camera_pipelines.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 4432767fd8..1f7da2fae3 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -313,13 +313,14 @@ def gst_msg_handler(_, msg: Gst.Message): if should_quit: loop.quit() - pipeline.set_state(Gst.State.NULL) for timeout in timeout_sources: # if the pipeline is terminated early, remove all timers asap # because loop.quit() won't remove/stop those # that are already scheduled => segfault (EOS on null pipeline) # See: https://docs.gtk.org/glib/method.MainLoop.quit.html timeout.destroy() + # setting NULL can be slow on certain encoders + pipeline.set_state(Gst.State.NULL) def send_eos(): logger.debug("Sending EOS.") From a8ca59a583f736958f793277f79dc389cc36a482 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:31:17 +0800 Subject: [PATCH 73/79] refactor: use path object --- .../checkbox_support/camera_pipelines.py | 11 +- .../base/bin/camera_test_auto_gst_source.py | 226 ++++++++++-------- 2 files changed, 128 insertions(+), 109 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 1f7da2fae3..578bd68c0a 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -2,6 +2,7 @@ import gi import typing as T import logging +from os import PathLike, fspath logger = logging.getLogger(__name__) logging.basicConfig( @@ -436,7 +437,7 @@ def take_photo( source: Gst.Element, *, caps: T.Optional[Gst.Caps] = None, - file_path: str, + file_path: PathLike, delay_seconds: int ): """Take a photo using the source element @@ -457,7 +458,9 @@ def take_photo( "videoconvert name=converter", # 2 "valve name=photo-valve drop=True", # 3 "jpegenc", # 4 - "multifilesink post-messages=True location={}".format(file_path), # 5 + "multifilesink post-messages=True location={}".format( + fspath(file_path) + ), # 5 ] head_elem_name = "source-caps" @@ -546,7 +549,7 @@ def record_video( source: Gst.Element, *, caps: T.Optional[Gst.Caps] = None, - file_path: str, + file_path: PathLike, record_n_seconds: int, encoding_profile: str ): @@ -560,7 +563,7 @@ def record_video( "decodebin", # 1 "videoconvert name=converter", # 2 "encodebin profile={}".format(encoding_profile), # 3 - "filesink location={}".format(file_path), # 4 + "filesink location={}".format(fspath(file_path)), # 4 ] head_elem_name = "source-caps" diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index ff23d4fbe4..adddaa2fee 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -1,11 +1,14 @@ #! /usr/bin/python3 import os +from tempfile import TemporaryDirectory import gi from argparse import ArgumentParser import typing as T import logging from checkbox_support import camera_pipelines as cam +from contextlib import nullcontext +from pathlib import Path logger = logging.getLogger(__name__) logging.basicConfig( @@ -26,7 +29,10 @@ class MediaValidator: @staticmethod def validate_image_dimensions( - image_file_path: str, *, expected_width: int, expected_height: int + image_file_path: os.PathLike, + *, + expected_width: int, + expected_height: int ) -> bool: if not os.path.isfile(image_file_path): logger.error( @@ -36,7 +42,9 @@ def validate_image_dimensions( discoverer = GstPbutils.Discoverer() try: - info = discoverer.discover_uri("file://{}".format(image_file_path)) + info = discoverer.discover_uri( + "file://{}".format(os.fspath(image_file_path)) + ) except (GLib.GError, GLib.Error) as e: logger.error( "Encountered an error when attempting to read {}.".format( @@ -70,7 +78,7 @@ def validate_image_dimensions( @staticmethod def validate_video_info( - video_file_path: str, + video_file_path: os.PathLike, *, expected_width: int, expected_height: int, @@ -87,10 +95,12 @@ def validate_video_info( discoverer = GstPbutils.Discoverer() try: - info = discoverer.discover_uri("file://{}".format(video_file_path)) + info = discoverer.discover_uri( + "file://{}".format(os.fspath(video_file_path)) + ) except (GLib.GError, GLib.Error) as e: logger.error( - "Encountered an error when attempting to read {}.".format( + "Encountered an error when attempting to read {}. ".format( video_file_path ) + str(e) @@ -191,13 +201,6 @@ def parse_args(): "before taking the photo. Default = {}.".format(default_wait_seconds), default=default_wait_seconds, ) - photo_subparser.add_argument( - "-p", - "--path", - type=str, - help="Where to save the file. This should be a directory.", - required=True, - ) photo_subparser.add_argument( "--skip-validation", action="store_true", @@ -226,13 +229,6 @@ def parse_args(): ), default=default_record_seconds, ) - video_subparser.add_argument( - "-p", - "--path", - type=str, - help="Where to save the file. This should be a directory.", - required=True, - ) default_tolerance = 0.5 video_subparser.add_argument( "-t", @@ -296,6 +292,15 @@ def parse_args(): ), default=default_viewfinder_seconds, ) + for file_needed_parser in (video_subparser, photo_subparser): + file_needed_parser.add_argument( + "-p", + "--path", + type=str, + help="Where to save output files. This should be a directory. " + "If not specified, a directory will be created in /tmp " + 'with the prefix "camera_test_auto_gst_"', + ) return parser.parse_args() @@ -332,16 +337,19 @@ def main(): ), ) - for dev_i, device in enumerate(devices): - dev_element = device.create_element() # type: Gst.Element - - if args.subcommand == "show-viewfinder": - cam.show_viewfinder(dev_element, show_n_seconds=args.seconds) - continue - - abs_path = os.path.abspath( - os.path.expanduser(os.path.expandvars(args.path)) - ) + with ( + TemporaryDirectory(prefix="camera_test_auto_gst_") + if not (hasattr(args, "path") and args.path) + else nullcontext() + ) as tmp_dir: + if tmp_dir: + abs_path = Path(tmp_dir) + else: + abs_path = Path( + os.path.abspath( + os.path.expanduser(os.path.expandvars(args.path)) + ) + ) if not os.path.isdir(abs_path): # must validate early # multifilesink does not check if the path exists @@ -349,91 +357,99 @@ def main(): 'Path "{}" does not exist'.format(abs_path) ) - resolver = cam.CapsResolver() - all_fixed_caps = resolver.get_all_fixated_caps( - device.get_caps(), "known_values" - ) + for dev_i, device in enumerate(devices): + dev_element = device.create_element() # type: Gst.Element - logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) - logger.info( # just an estimate - "Test for this device may take {} seconds for {} caps.".format( - len(all_fixed_caps) * max(args.seconds, 1), len(all_fixed_caps) + if args.subcommand == "show-viewfinder": + cam.show_viewfinder(dev_element, show_n_seconds=args.seconds) + continue + + resolver = cam.CapsResolver() + all_fixed_caps = resolver.get_all_fixated_caps( + device.get_caps(), "known_values" ) - ) - for cap_i, capability in enumerate(all_fixed_caps): - # since we use the same element for all caps - # previous parent pipelines are not auto removed - # need to explicitly unref - dev_element.unparent() - cap_struct = capability.get_structure(0) - if args.subcommand == "take-photo": - logger.info( - "Taking a photo with capability: " - + '"{}"'.format(capability.to_string()) - + "for device: " - + '"{}"'.format(device.get_display_name()), - ) - file_path = "{}/photo_dev_{}_cap_{}.jpeg".format( - abs_path, dev_i, cap_i - ) - cam.take_photo( - dev_element, - delay_seconds=args.seconds, - caps=capability, - file_path=file_path, + logger.info("Testing device {}/{}".format(dev_i + 1, len(devices))) + logger.info( # just an estimate + "Test for this device may take {} seconds for {} caps.".format( + len(all_fixed_caps) * max(args.seconds, 1), + len(all_fixed_caps), ) + ) - if args.skip_validation: - continue + for cap_i, capability in enumerate(all_fixed_caps): + # since we use the same element for all caps + # previous parent pipelines are not auto removed + # need to explicitly unref + dev_element.unparent() + cap_struct = capability.get_structure(0) + if args.subcommand == "take-photo": + logger.info( + "Taking a photo with capability: " + + '"{}"'.format(capability.to_string()) + + "for device: " + + '"{}"'.format(device.get_display_name()), + ) + file_path = abs_path / "photo_dev_{}_cap_{}.jpeg".format( + dev_i, cap_i + ) + cam.take_photo( + dev_element, + delay_seconds=args.seconds, + caps=capability, + file_path=file_path, + ) - MediaValidator.validate_image_dimensions( - file_path, - expected_width=cap_struct.get_int("width").value, - expected_height=cap_struct.get_int("height").value, - ) - elif args.subcommand == "record-video": - if args.encoding is not None: - encoding_profile = ENCODING_PROFILES[args.encoding][ - "profile_str" - ] - file_extension = ENCODING_PROFILES[args.encoding][ - "file_extension" - ] - if args.file_extension is not None: - file_extension = args.file_extension - else: - encoding_profile = args.custom_encoding_string - assert args.file_extension, ( - "File extension must be specified " - "when using custom encoding string" + if args.skip_validation: + continue + + MediaValidator.validate_image_dimensions( + file_path, + expected_width=cap_struct.get_int("width").value, + expected_height=cap_struct.get_int("height").value, ) - file_extension = args.file_extension + elif args.subcommand == "record-video": + if args.encoding is not None: + encoding_profile = ENCODING_PROFILES[args.encoding][ + "profile_str" + ] + file_extension = ENCODING_PROFILES[args.encoding][ + "file_extension" + ] + if args.file_extension is not None: + file_extension = args.file_extension + else: + encoding_profile = args.custom_encoding_string + assert args.file_extension, ( + "File extension must be specified " + "when using custom encoding string" + ) + file_extension = args.file_extension - file_path = "{}/video_dev_{}_cap_{}.{}".format( - abs_path, dev_i, cap_i, file_extension - ) - cam.record_video( - dev_element, - file_path=file_path, - caps=capability, - record_n_seconds=args.seconds, - encoding_profile=encoding_profile, - ) + file_path = abs_path / "video_dev_{}_cap_{}.{}".format( + dev_i, cap_i, file_extension + ) + cam.record_video( + dev_element, + file_path=file_path, + caps=capability, + record_n_seconds=args.seconds, + encoding_profile=encoding_profile, + ) - if args.skip_validation: - continue - - MediaValidator.validate_video_info( - file_path, - expected_duration_seconds=args.seconds, - expected_width=cap_struct.get_int("width").value, - expected_height=cap_struct.get_int("height").value, - duration_tolerance_seconds=args.tolerance, - expected_fps=cap_struct.get_fraction( - "framerate" - ).value_numerator, - ) + if args.skip_validation: + continue + + MediaValidator.validate_video_info( + file_path, + expected_duration_seconds=args.seconds, + expected_width=cap_struct.get_int("width").value, + expected_height=cap_struct.get_int("height").value, + duration_tolerance_seconds=args.tolerance, + expected_fps=cap_struct.get_fraction( + "framerate" + ).value_numerator, + ) logger.info("[ OK ] All done!") From 22cdc6710912ef3418a8e91bf02ef1ada3d43a26 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:21:55 +0800 Subject: [PATCH 74/79] test: new test fixture --- .../tests/test_camera_test_auto_gst_source.py | 88 +++++++++++++++++-- 1 file changed, 79 insertions(+), 9 deletions(-) diff --git a/providers/base/tests/test_camera_test_auto_gst_source.py b/providers/base/tests/test_camera_test_auto_gst_source.py index 13baa05c49..d4e32b94a9 100644 --- a/providers/base/tests/test_camera_test_auto_gst_source.py +++ b/providers/base/tests/test_camera_test_auto_gst_source.py @@ -2,6 +2,8 @@ from unittest.mock import MagicMock, patch from shlex import split as sh_split import sys +from io import StringIO +from pathlib import Path mock_gi = MagicMock() @@ -16,6 +18,7 @@ }, ) class CameraTestAutoGstSourceTests(ut.TestCase): + @patch("sys.stdout", new=StringIO()) def test_correct_subcommand_is_executed(self): with patch("os.path.isdir") as mock_isdir, patch( @@ -66,12 +69,12 @@ def test_correct_subcommand_is_executed(self): self.assertEqual(mock_cam.show_viewfinder.call_count, 1) @patch("os.path.isfile") - # @patch("gi.repository.GstPbutils.Discoverer") @patch("camera_test_auto_gst_source.logger") - @patch("camera_test_auto_gst_source.Image") + # @patch("camera_test_auto_gst_source.PIL.Image") + @patch("camera_test_auto_gst_source.GstPbutils") def test_image_validator( self, - mock_pil: MagicMock, + mock_pbutils: MagicMock, mock_logger: MagicMock, mock_isfile: MagicMock, ): @@ -81,26 +84,26 @@ def test_image_validator( expected_height = 480 mock_isfile.return_value = True - mock_pil.open().width = expected_width - mock_pil.open().height = expected_height + self._make_mock_video_info( + mock_pbutils, expected_width, expected_height + ) validator = CTAGS.MediaValidator() self.assertTrue( validator.validate_image_dimensions( - "some/path", + Path("some/path"), expected_height=expected_height, expected_width=expected_width, ) ) bad_width = 1237219831 - mock_pil.open().width = bad_width - mock_pil.open().height = expected_height + self._make_mock_video_info(mock_pbutils, bad_width, expected_height) self.assertFalse( validator.validate_image_dimensions( - "some/path", + Path("some/path"), expected_height=expected_height, expected_width=expected_width, ) @@ -112,6 +115,73 @@ def test_image_validator( ) ) + @patch("camera_test_auto_gst_source.Gst") + @patch("os.path.isfile") + @patch("camera_test_auto_gst_source.GstPbutils") + @patch("camera_test_auto_gst_source.logger") + def test_video_validator( + self, + mock_logger: MagicMock, + mock_pbutils: MagicMock, + mock_isfile: MagicMock, + mock_gst: MagicMock, + ): + import camera_test_auto_gst_source as CTAGS + + mock_gst.SECOND = 1 + + expected_width = 640 + expected_height = 480 + expected_fps = 30 + expected_duration = 5 + + self._make_mock_video_info( + mock_pbutils, + expected_width, + expected_height, + expected_fps, + expected_duration, + ) + mock_isfile.return_value = True + validator = CTAGS.MediaValidator() + result = validator.validate_video_info( + Path("some/path"), + expected_width=expected_width, + expected_height=expected_height, + expected_fps=expected_fps, + expected_duration_seconds=expected_duration, + duration_tolerance_seconds=0.5, + ) + self.assertTrue(result) + + def _make_mock_video_info( + self, + mock_pbutils: MagicMock, + width, + height, + fps=None, + duration=None, + ): + # mock_discoverer = MagicMock() + mock_pbutils.reset_mock() + video_info = MagicMock() + mock_pbutils.name = "mymockpbutils" + if duration: + video_info.get_duration.return_value = duration + mock_pbutils.Discoverer.return_value = MagicMock() + + mock_pbutils.Discoverer().discover_uri.return_value = video_info + video_stream = MagicMock() + video_stream.get_width.return_value = width + video_stream.get_height.return_value = height + + if fps: + video_stream.get_framerate_num.return_value = fps + + video_info.get_video_streams.return_value = [video_stream] + + return mock_pbutils, video_info + if __name__ == "__main__": ut.main() From 1cd3bcd2edd766d5bedfefff502a5f273a7c35bd Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:02:02 +0800 Subject: [PATCH 75/79] test: more tests --- .../base/bin/camera_test_auto_gst_source.py | 6 +- .../tests/test_camera_test_auto_gst_source.py | 229 +++++++++++++++++- 2 files changed, 219 insertions(+), 16 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index adddaa2fee..866c0e3512 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -305,7 +305,7 @@ def parse_args(): return parser.parse_args() -def main(): +def main() -> int: args = parse_args() if os.getuid() == 0: logger.warning( @@ -322,7 +322,7 @@ def main(): "Gst.DeviceProvider to make itself visible to GStreamer " "or it is inaccessible without sudo." ) - exit(1) + return 1 logger.info("Found {} cameras!".format(len(devices))) print( @@ -452,7 +452,7 @@ def main(): ) logger.info("[ OK ] All done!") - + return 0 if __name__ == "__main__": Gst.init(None) diff --git a/providers/base/tests/test_camera_test_auto_gst_source.py b/providers/base/tests/test_camera_test_auto_gst_source.py index d4e32b94a9..68bfd137d9 100644 --- a/providers/base/tests/test_camera_test_auto_gst_source.py +++ b/providers/base/tests/test_camera_test_auto_gst_source.py @@ -1,5 +1,6 @@ +from os import fspath import unittest as ut -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, call, patch from shlex import split as sh_split import sys from io import StringIO @@ -20,7 +21,6 @@ class CameraTestAutoGstSourceTests(ut.TestCase): @patch("sys.stdout", new=StringIO()) def test_correct_subcommand_is_executed(self): - with patch("os.path.isdir") as mock_isdir, patch( "os.path.isfile" ) as mock_isfile, patch( @@ -37,7 +37,6 @@ def test_correct_subcommand_is_executed(self): mock_get_devices.return_value = [MagicMock()] mock_validator.validate_image_dimensions.return_value = True - # print(dir(mock_cam.take_photo)) mock_resolver = MagicMock() mock_cam.CapsResolver.return_value = mock_resolver mock_resolver.get_all_fixated_caps.return_value = [MagicMock()] @@ -70,7 +69,6 @@ def test_correct_subcommand_is_executed(self): @patch("os.path.isfile") @patch("camera_test_auto_gst_source.logger") - # @patch("camera_test_auto_gst_source.PIL.Image") @patch("camera_test_auto_gst_source.GstPbutils") def test_image_validator( self, @@ -99,7 +97,8 @@ def test_image_validator( ) bad_width = 1237219831 - self._make_mock_video_info(mock_pbutils, bad_width, expected_height) + bad_height = 1133222 + self._make_mock_video_info(mock_pbutils, bad_width, bad_height) self.assertFalse( validator.validate_image_dimensions( @@ -109,10 +108,19 @@ def test_image_validator( ) ) - mock_logger.error.assert_called_with( - "Image width mismatch. Expected = {}, actual = {}".format( - expected_width, bad_width - ) + mock_logger.error.assert_has_calls( + [ + call( + "Image width mismatch. Expected = {}, actual = {}".format( + expected_width, bad_width + ) + ), + call( + "Image height mismatch. Expected = {}, actual = {}".format( + expected_height, bad_height + ) + ), + ] ) @patch("camera_test_auto_gst_source.Gst") @@ -154,6 +162,203 @@ def test_video_validator( ) self.assertTrue(result) + bad_width = 1237219831 + bad_height = 113322 + bad_fps = 123 + bad_duration = 1 + + self._make_mock_video_info( + mock_pbutils, + bad_width, + bad_height, + bad_fps, + bad_duration, + ) + + mock_gst.SECOND = 1 + result = validator.validate_video_info( + Path("some/path"), + expected_width=expected_width, + expected_height=expected_height, + expected_fps=expected_fps, + expected_duration_seconds=expected_duration, + duration_tolerance_seconds=0.5, + ) + self.assertFalse(result) + + mock_logger.error.assert_has_calls( + [ + call( + "Duration not within tolerance. " + "Got {}s, but expected {} +- {}s".format( + round(bad_duration / mock_gst.SECOND, 3), + expected_duration, + 0.5, + ) + ), + call( + "Video width mismatch. Expected = {}, actual = {}".format( + expected_width, bad_width + ) + ), + call( + "Video height mismatch. Expected = {}, actual = {}".format( + expected_height, bad_height + ) + ), + call( + "Video FPS mismatch. Expected = {}fps, actual = {}fps".format( + expected_fps, bad_fps + ) + ), + ] + ) + + mock_isfile.return_value = False + + result = validator.validate_video_info( + Path("some/path"), + expected_width=expected_width, + expected_height=expected_height, + expected_fps=expected_fps, + expected_duration_seconds=expected_duration, + duration_tolerance_seconds=0.5, + ) + mock_logger.error.assert_called_with( + "Video file doesn't exist at some/path" + ) + + @patch( + "sys.argv", + sh_split("camera_test_auto_gst_source.py take-photo -p some/dir"), + ) + @patch("camera_test_auto_gst_source.get_devices") + @patch("camera_test_auto_gst_source.logger") + def test_exit_if_no_cameras( + self, + mock_logger: MagicMock, + mock_get_devices: MagicMock, + ): + mock_get_devices.return_value = [] + import camera_test_auto_gst_source as CTAGS + + self.assertEqual(CTAGS.main(), 1) + mock_logger.error.assert_called_with( + "GStreamer cannot find any cameras on this device. " + "If you know a camera element exists, then it did not implement " + "Gst.DeviceProvider to make itself visible to GStreamer " + "or it is inaccessible without sudo." + ) + + @patch("camera_test_auto_gst_source.get_devices") + @patch("camera_test_auto_gst_source.cam") + def test_encoding_arg_group( + self, mock_cam: MagicMock, mock_get_devices: MagicMock + ): + import camera_test_auto_gst_source as CTAGS + + mock_resolver = MagicMock() + mock_cam.CapsResolver.return_value = mock_resolver + mock_resolver.get_all_fixated_caps.return_value = [MagicMock()] + mock_get_devices.return_value = [MagicMock()] + + with patch( + "sys.argv", + sh_split( + "camera_test_auto_gst_source.py record-video " + "--encoding mp4_h264 --skip-validation" + ), + ): + CTAGS.main() + last_called_args = mock_cam.record_video.call_args[-1] + self.assertEqual( + last_called_args["encoding_profile"], + CTAGS.ENCODING_PROFILES["mp4_h264"]["profile_str"], + ) + self.assertIn( + CTAGS.ENCODING_PROFILES["mp4_h264"]["file_extension"], + fspath(last_called_args["file_path"]), + ) + + file_ext = "ext" + with patch( + "sys.argv", + sh_split( + "camera_test_auto_gst_source.py record-video " + "--encoding mp4_h264 --file-extension {}".format(file_ext) + ), + ): + CTAGS.main() + last_called_args = mock_cam.record_video.call_args[-1] + self.assertEqual( + last_called_args["encoding_profile"], + CTAGS.ENCODING_PROFILES["mp4_h264"]["profile_str"], + ) + self.assertIn( + file_ext, + fspath(last_called_args["file_path"]), + ) + + encoding_str = "video/something, str" + with patch( + "sys.argv", + sh_split( + "camera_test_auto_gst_source.py record-video " + + '--custom-encoding-string "{}" '.format(encoding_str) + + "--file-extension {}".format(file_ext) + ), + ): + CTAGS.main() + last_called_args = mock_cam.record_video.call_args[-1] + self.assertEqual( + last_called_args["encoding_profile"], + encoding_str, + ) + self.assertIn( + file_ext, + fspath(last_called_args["file_path"]), + ) + + with patch( + "sys.argv", + sh_split( + "camera_test_auto_gst_source.py record-video " + + '--custom-encoding-string "{}" '.format(encoding_str) + ), + ): + self.assertRaises(AssertionError, CTAGS.main) + + @patch( + "sys.argv", + sh_split("camera_test_auto_gst_source.py take-photo -p some/dir"), + ) + @patch("os.path.isfile") + @patch("camera_test_auto_gst_source.GstPbutils") + @patch("camera_test_auto_gst_source.GLib") + def test_handle_glib_errors( + self, + mock_glib: MagicMock, + mock_pbutils: MagicMock, + mock_isfile: MagicMock, + ): + mock_glib.GError = Exception + mock_isfile.return_value = True + mock_discoverer = MagicMock() + mock_discoverer.name = "bruh" + mock_pbutils.Discoverer.return_value = mock_discoverer + mock_discoverer.discover_uri.side_effect = Exception() + + import camera_test_auto_gst_source as CTAGS + + self.assertRaises( + Exception, + lambda: CTAGS.MediaValidator.validate_image_dimensions( + Path("some/path"), + expected_height=1, + expected_width=1, + ), + ) + def _make_mock_video_info( self, mock_pbutils: MagicMock, @@ -162,11 +367,9 @@ def _make_mock_video_info( fps=None, duration=None, ): - # mock_discoverer = MagicMock() mock_pbutils.reset_mock() video_info = MagicMock() - mock_pbutils.name = "mymockpbutils" - if duration: + if duration is not None: video_info.get_duration.return_value = duration mock_pbutils.Discoverer.return_value = MagicMock() @@ -175,7 +378,7 @@ def _make_mock_video_info( video_stream.get_width.return_value = width video_stream.get_height.return_value = height - if fps: + if fps is not None: video_stream.get_framerate_num.return_value = fps video_info.get_video_streams.return_value = [video_stream] From 7e3216404d3fc87ad76c03374675955009f9ac58 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:13:12 +0800 Subject: [PATCH 76/79] test: increase coverage --- .../base/bin/camera_test_auto_gst_source.py | 2 +- .../tests/test_camera_test_auto_gst_source.py | 34 ++++++++++++++++--- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 866c0e3512..f1526a83a1 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -47,7 +47,7 @@ def validate_image_dimensions( ) except (GLib.GError, GLib.Error) as e: logger.error( - "Encountered an error when attempting to read {}.".format( + "Encountered an error when attempting to read {}. ".format( image_file_path ) + str(e) # cleaner message is in e.message diff --git a/providers/base/tests/test_camera_test_auto_gst_source.py b/providers/base/tests/test_camera_test_auto_gst_source.py index 68bfd137d9..2aecd14764 100644 --- a/providers/base/tests/test_camera_test_auto_gst_source.py +++ b/providers/base/tests/test_camera_test_auto_gst_source.py @@ -335,30 +335,54 @@ def test_encoding_arg_group( @patch("os.path.isfile") @patch("camera_test_auto_gst_source.GstPbutils") @patch("camera_test_auto_gst_source.GLib") + @patch("camera_test_auto_gst_source.logger") def test_handle_glib_errors( self, + mock_logger: MagicMock, mock_glib: MagicMock, mock_pbutils: MagicMock, mock_isfile: MagicMock, ): - mock_glib.GError = Exception + class GError(BaseException): + pass + + mock_glib.GError = GError + mock_glib.Error = GError mock_isfile.return_value = True mock_discoverer = MagicMock() mock_discoverer.name = "bruh" mock_pbutils.Discoverer.return_value = mock_discoverer - mock_discoverer.discover_uri.side_effect = Exception() + mock_discoverer.discover_uri.side_effect = GError("some message") import camera_test_auto_gst_source as CTAGS - self.assertRaises( - Exception, - lambda: CTAGS.MediaValidator.validate_image_dimensions( + self.assertFalse( + CTAGS.MediaValidator.validate_image_dimensions( Path("some/path"), expected_height=1, expected_width=1, ), ) + mock_logger.error.assert_called_with( + "Encountered an error when attempting to read some/path. some message" + ) + + self.assertFalse( + CTAGS.MediaValidator.validate_video_info( + Path("some/path"), + expected_height=1, + expected_width=1, + duration_tolerance_seconds=1, + expected_duration_seconds=1, + expected_fps=1, + ), + ) + + mock_logger.error.assert_called_with( + "Encountered an error when attempting to read some/path. some message" + ) + def _make_mock_video_info( self, mock_pbutils: MagicMock, From 88a9ba9d95a4e3a89062d22b6cb89e3b18e4a983 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:28:58 +0800 Subject: [PATCH 77/79] fix: 3.5 doesn't have pathlike --- checkbox-support/checkbox_support/camera_pipelines.py | 10 +++++----- providers/base/bin/camera_test_auto_gst_source.py | 8 +++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/checkbox-support/checkbox_support/camera_pipelines.py b/checkbox-support/checkbox_support/camera_pipelines.py index 578bd68c0a..7403514446 100644 --- a/checkbox-support/checkbox_support/camera_pipelines.py +++ b/checkbox-support/checkbox_support/camera_pipelines.py @@ -2,7 +2,7 @@ import gi import typing as T import logging -from os import PathLike, fspath +from pathlib import Path logger = logging.getLogger(__name__) logging.basicConfig( @@ -437,7 +437,7 @@ def take_photo( source: Gst.Element, *, caps: T.Optional[Gst.Caps] = None, - file_path: PathLike, + file_path: Path, delay_seconds: int ): """Take a photo using the source element @@ -459,7 +459,7 @@ def take_photo( "valve name=photo-valve drop=True", # 3 "jpegenc", # 4 "multifilesink post-messages=True location={}".format( - fspath(file_path) + str(file_path) ), # 5 ] head_elem_name = "source-caps" @@ -549,7 +549,7 @@ def record_video( source: Gst.Element, *, caps: T.Optional[Gst.Caps] = None, - file_path: PathLike, + file_path: Path, record_n_seconds: int, encoding_profile: str ): @@ -563,7 +563,7 @@ def record_video( "decodebin", # 1 "videoconvert name=converter", # 2 "encodebin profile={}".format(encoding_profile), # 3 - "filesink location={}".format(fspath(file_path)), # 4 + "filesink location={}".format(str(file_path)), # 4 ] head_elem_name = "source-caps" diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index f1526a83a1..8bd382b39e 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -29,10 +29,7 @@ class MediaValidator: @staticmethod def validate_image_dimensions( - image_file_path: os.PathLike, - *, - expected_width: int, - expected_height: int + image_file_path: Path, *, expected_width: int, expected_height: int ) -> bool: if not os.path.isfile(image_file_path): logger.error( @@ -78,7 +75,7 @@ def validate_image_dimensions( @staticmethod def validate_video_info( - video_file_path: os.PathLike, + video_file_path: Path, *, expected_width: int, expected_height: int, @@ -454,6 +451,7 @@ def main() -> int: logger.info("[ OK ] All done!") return 0 + if __name__ == "__main__": Gst.init(None) GstPbutils.pb_utils_init() From ed5908013b7cdc255a3b9acf7f133b0d8d4df63b Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:35:42 +0800 Subject: [PATCH 78/79] fix: removed broken tests --- .../tests/test_camera_pipelines.py | 101 ------------------ 1 file changed, 101 deletions(-) delete mode 100644 checkbox-support/checkbox_support/tests/test_camera_pipelines.py diff --git a/checkbox-support/checkbox_support/tests/test_camera_pipelines.py b/checkbox-support/checkbox_support/tests/test_camera_pipelines.py deleted file mode 100644 index f80b356dae..0000000000 --- a/checkbox-support/checkbox_support/tests/test_camera_pipelines.py +++ /dev/null @@ -1,101 +0,0 @@ -import itertools -from unittest.mock import MagicMock, patch - -import unittest as ut -import sys -import gi -from checkbox_support.camera_pipelines import CapsResolver - -gi.require_version("Gst", "1.0") -from gi.repository import Gst - - -class CapsResolverTests(ut.TestCase): - - resolver = None - - @classmethod - def setUpClass(cls): - Gst.init([]) - cls.resolver = CapsResolver() - - def test_fraction_list(self): - out = self.resolver.get_all_fixated_caps( - Gst.Caps.from_string( - "image/jpeg, width=1280, height=720, framerate={ (fraction)30/1, (fraction)15/1 }" - ), - "remap", - ) - - self.assertEqual( - [c.to_string() for c in out], - [ - "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)30/1", - "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)15/1", - ], - ) - - def test_fraction_range(self): - out = self.resolver.get_all_fixated_caps( - Gst.Caps.from_string( - "image/jpeg, width=1280, height=720, framerate=[ (fraction)1/1, (fraction)100/1 ]" - ), - "remap", - ) - - self.assertCountEqual( # quality without order - [cap.to_string() for cap in out], - [ - "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)15/1", - "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)30/1", - "image/jpeg, width=(int)1280, height=(int)720, framerate=(fraction)60/1", - ], - ) - - def test_int_range(self): - out = self.resolver.get_all_fixated_caps( - Gst.Caps.from_string( - "image/jpeg, width=[1, 1280], height=[1, 720], framerate=[ (fraction)1/1, (fraction)100/1 ]" - ), - "remap", - ) - answer = [ - "image/jpeg, width=(int){}, height=(int){}, framerate=(fraction){}".format( - width, height, framerate - ) - for width, height, framerate in itertools.product( - (640, 1280), (480, 720), ("15/1", "30/1", "60/1") - ) - ] - self.assertCountEqual( # quality without order - [cap.to_string() for cap in out], - answer, - ) - - def test_all_lists(self): - widths = ["20", "30", "40"] - heights = ["10", "720"] - framerates = ["15/1", "30/1", "60/1"] - answer = [ - "image/jpeg, width=(int){}, height=(int){}, framerate=(fraction){}".format( - width, height, framerate - ) - for width, height, framerate in itertools.product( - widths, heights, framerates - ) - ] - out = self.resolver.get_all_fixated_caps( - Gst.Caps.from_string( - "image/jpeg, width={{{}}}, height={{{}}}, framerate={{{}}}".format( - ", ".join(widths), - ", ".join(heights), - ", ".join(framerates), - ) - ), - "remap", - ) - self.assertCountEqual([cap.to_string() for cap in out], answer) - - -if __name__ == "__main__": - ut.main() From 92e954d10f7c0bc11b830c989ce68737f5604ed9 Mon Sep 17 00:00:00 2001 From: Zhongning Li <60045212+tomli380576@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:23:00 +0800 Subject: [PATCH 79/79] fix: nullcontext doesn't exist in 3.5 --- providers/base/bin/camera_test_auto_gst_source.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/providers/base/bin/camera_test_auto_gst_source.py b/providers/base/bin/camera_test_auto_gst_source.py index 8bd382b39e..16f84f14b2 100755 --- a/providers/base/bin/camera_test_auto_gst_source.py +++ b/providers/base/bin/camera_test_auto_gst_source.py @@ -7,7 +7,7 @@ import typing as T import logging from checkbox_support import camera_pipelines as cam -from contextlib import nullcontext +from contextlib import ExitStack from pathlib import Path logger = logging.getLogger(__name__) @@ -334,12 +334,12 @@ def main() -> int: ), ) - with ( - TemporaryDirectory(prefix="camera_test_auto_gst_") - if not (hasattr(args, "path") and args.path) - else nullcontext() - ) as tmp_dir: - if tmp_dir: + with ExitStack() as stack: + + if not (hasattr(args, "path") and args.path): + tmp_dir = stack.enter_context( + TemporaryDirectory(prefix="camera_test_auto_gst_") + ) abs_path = Path(tmp_dir) else: abs_path = Path(