diff --git a/requirements.txt b/requirements.txt index 3d09903..9146102 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -glassesTools[GUI]==1.10.6 # pulls in various other dependencies such as imgui_bundle, matplotlib, numpy, opencv, pandas, polars +glassesTools[GUI]==1.11.0 # pulls in various other dependencies such as imgui_bundle, matplotlib, numpy, opencv, pandas, polars I2MC aiosqlite uvloop ; sys_platform != "win32" diff --git a/src/glassesValidator/process/b_codeMarkerInterval.py b/src/glassesValidator/process/b_codeMarkerInterval.py index 6fe45c0..70c2cbd 100644 --- a/src/glassesValidator/process/b_codeMarkerInterval.py +++ b/src/glassesValidator/process/b_codeMarkerInterval.py @@ -12,7 +12,7 @@ if isMacOS: import AppKit -from glassesTools import annotation, gaze_headref, gaze_worldref, ocv, plane, recording, timestamps +from glassesTools import annotation, gaze_headref, gaze_worldref, naming, ocv, plane, recording, timestamps from glassesTools.gui import video_player from .. import config @@ -58,7 +58,7 @@ def do_the_work(working_dir, config_dir, gui: video_player.GUI, main_win_id, sho poster = config.poster.Poster(config_dir, validationSetup) # Read gaze data - gazes = gaze_headref.read_dict_from_file(working_dir / 'gazeData.tsv')[0] + gazes = gaze_headref.read_dict_from_file(working_dir / naming.gaze_data_fname)[0] # Read pose of poster, if available hasPosterPose = False @@ -81,7 +81,7 @@ def do_the_work(working_dir, config_dir, gui: video_player.GUI, main_win_id, sho pass # get camera calibration info - cameraParams= ocv.CameraParams.read_from_file(working_dir / "calibration.xml") + cameraParams= ocv.CameraParams.read_from_file(working_dir / naming.scene_camera_calibration_fname) hasCamCal = cameraParams.has_intrinsics() # get interval coded to be analyzed, if available @@ -100,7 +100,7 @@ def do_the_work(working_dir, config_dir, gui: video_player.GUI, main_win_id, sho if show_poster: poster_win_id = gui.add_window("poster") # 3. timestamp info for relating audio to video frames - video_ts = timestamps.VideoTimestamps( working_dir / 'frameTimestamps.tsv' ) + video_ts = timestamps.VideoTimestamps( working_dir / naming.frame_timestamps_fname ) # 4. mediaplayer for the actual video playback, with sound if available inVideo = recInfo.get_scene_video_path() ff_opts = {'volume': 1., 'sync': 'audio', 'framedrop': True} diff --git a/src/glassesValidator/process/c_detectMarkers.py b/src/glassesValidator/process/c_detectMarkers.py index 3a595cd..857177f 100644 --- a/src/glassesValidator/process/c_detectMarkers.py +++ b/src/glassesValidator/process/c_detectMarkers.py @@ -1,7 +1,7 @@ import pathlib import threading -from glassesTools import annotation, aruco, plane, recording +from glassesTools import annotation, aruco, naming, plane, recording from glassesTools.gui import video_player from .. import config @@ -51,7 +51,7 @@ def do_the_work(working_dir, config_dir, gui, show_rejected_markers): in_video = recInfo.get_scene_video_path() # set up pose estimator and run it - estimator = aruco.PoseEstimator(in_video, working_dir / "frameTimestamps.tsv", working_dir / "calibration.xml") + estimator = aruco.PoseEstimator(in_video, working_dir / naming.frame_timestamps_fname, working_dir / naming.scene_camera_calibration_fname) estimator.add_plane('validate', {'plane': poster, 'aruco_params': {'markerBorderBits': validationSetup['markerBorderBits']}, 'min_num_markers': validationSetup['minNumMarkers']}, analyzeFrames) diff --git a/src/glassesValidator/process/d_gazeToPoster.py b/src/glassesValidator/process/d_gazeToPoster.py index ab0764c..6098f0b 100644 --- a/src/glassesValidator/process/d_gazeToPoster.py +++ b/src/glassesValidator/process/d_gazeToPoster.py @@ -1,7 +1,7 @@ import pathlib import threading -from glassesTools import annotation, gaze_headref, gaze_worldref, ocv, plane, recording +from glassesTools import annotation, gaze_headref, gaze_worldref, naming, ocv, plane, recording from glassesTools.gui import video_player, worldgaze from .. import config @@ -38,13 +38,13 @@ def do_the_work(working_dir, config_dir, gui, frame_win_id, show_poster, show_on utils.update_recording_status(working_dir, utils.Task.Gaze_Tranformed_To_Poster, utils.Status.Running) # get camera calibration info - cameraParams = ocv.CameraParams.read_from_file(working_dir / "calibration.xml") + cameraParams = ocv.CameraParams.read_from_file(working_dir / naming.scene_camera_calibration_fname) # get interval coded to be analyzed, if any analyzeFrames = utils.readMarkerIntervalsFile(working_dir / "markerInterval.tsv") # Read gaze data - head_gazes = gaze_headref.read_dict_from_file(working_dir / 'gazeData.tsv', episodes=analyzeFrames if not gui or show_only_intervals else None)[0] + head_gazes = gaze_headref.read_dict_from_file(working_dir / naming.gaze_data_fname, episodes=analyzeFrames if not gui or show_only_intervals else None)[0] # Read camera pose w.r.t. poster poses = plane.read_dict_from_file(working_dir / 'posterPose.tsv', episodes=analyzeFrames if not gui or show_only_intervals else None) @@ -66,7 +66,7 @@ def do_the_work(working_dir, config_dir, gui, frame_win_id, show_poster, show_on validationSetup = config.get_validation_setup(config_dir) poster = config.poster.Poster(config_dir, validationSetup) worldgaze.show_visualization( - in_video, working_dir / 'frameTimestamps.tsv', working_dir / "calibration.xml", + in_video, working_dir / naming.frame_timestamps_fname, working_dir / naming.scene_camera_calibration_fname, {'poster': poster}, {'poster': poses}, head_gazes, {'poster': plane_gazes}, {annotation.Event.Validate: analyzeFrames}, gui, frame_win_id, show_poster, show_only_intervals, 8 ) \ No newline at end of file diff --git a/src/glassesValidator/utils/makeVideo.py b/src/glassesValidator/utils/makeVideo.py index 10d7481..2e4d432 100644 --- a/src/glassesValidator/utils/makeVideo.py +++ b/src/glassesValidator/utils/makeVideo.py @@ -6,7 +6,7 @@ import numpy as np import threading -from glassesTools import annotation, aruco, gaze_headref, gaze_worldref, ocv, recording, timestamps, transforms +from glassesTools import annotation, aruco, gaze_headref, gaze_worldref, naming, ocv, recording, timestamps, transforms from glassesTools.gui import video_player from .. import config @@ -67,14 +67,14 @@ def do_the_work(working_dir, config_dir, gui: video_player.GUI, main_win_id, sho episodes = {annotation.Event.Validate: analyzeFrames} # Read gaze data - gazes_head = gaze_headref.read_dict_from_file(working_dir / 'gazeData.tsv')[0] + gazes_head = gaze_headref.read_dict_from_file(working_dir / naming.gaze_data_fname)[0] # get camera calibration info cameraParams = ocv.CameraParams.read_from_file(working_dir / "calibration.xml") # build pose estimator in_video = recInfo.get_scene_video_path() # get video file to process - video_ts = timestamps.VideoTimestamps(working_dir / "frameTimestamps.tsv") + video_ts = timestamps.VideoTimestamps(working_dir / naming.frame_timestamps_fname) pose_estimator = aruco.PoseEstimator(in_video, video_ts, cameraParams) pose_estimator.add_plane('validate', {'plane': poster, 'aruco_params': {'markerBorderBits': validationSetup['markerBorderBits']}, 'min_num_markers': validationSetup['minNumMarkers']})