diff --git a/Ethosight/README.md b/Ethosight/README.md index f056dca..669fd08 100644 --- a/Ethosight/README.md +++ b/Ethosight/README.md @@ -103,7 +103,7 @@ If no need to create a super user and migrations are done, you can simply run th python website/manage.py runserver 8080 ``` You can view your application by navigating to `http://localhost:8080` in your web browser. -You can create your own ethosight configuration by accessing `http://localhost:8080/admin`. +You can create your own ethosight [configuration](#ethosight-configuration) by accessing `http://localhost:8080/admin`. You can find an example in `./configs` folder. ## Registering new users. @@ -112,4 +112,40 @@ and request an access code in the registration form. The admin users should appr If there's no capability to handle mail sending, you can generate access codes manually using `genaccesscodes.py`, and use that access code in the form without requesting. - +## Ethosight Configuration + +`ethosight.yml` file is the setup configuration for the ethosight application. +You can find the example file inside `./configs` folder with all possible configurations and their explanations. + + +## CLI +Besides main application represented as UI. Ethosight provides CLI for all core classes and functionalities like +EthosightAppCLI, EthosightCLI, EthosightDatasetCLI, EthosightMediaAnalyzerCLI. + +The main one is EthosightAppCLI with bunch of useful methods. Some of them are still on implementation. + +* create_app (app_dir, config_file) - creates new application + * app_dir - the location where the application will be created and run along with config files, embeddings, labels + * config_file - *.yml config file path +* delete_app (app_dir) - deletes application located in app_dir + * app_dir - the application directory +* benchmark (app_dir) - Computes accuracy on a directory of images + * Computes accuracy on a directory of images. +* optimize(app_dir) - optimizes the EthosightApp + * app_dir - the application directory +* run(app_dir, image) - Runs the EthosightApp on a single image + * app_dir - the application directory + * image - image file path +* benchmark_video (app_dir, video_gt_csv_filename) - Runs video benchmarking on a video + * app_dir - the application directory + * video_gt_csv_filename - video file names with ground truths +* rank_affinities (app_dir, json_file_path) - ranks affinities from json results + * app_dir - the application directory + * json_file_path - json file path containing already computed affinity scores +* phase2videobenchmarks (app_dir, phase2_groundtruth_csv) - runs benchmarks on all of the affinity score json files contained in the csv file. these are produced by phase1 Ethosight processing of video datasets + * app_dir - the application directory + * phase2_groundtruth_csv - the csv file path +* add_labels (app_dir, labels) - Adds new labels to the EthosightApp + * app_dir - the application directory + * labels - new labels specified + \ No newline at end of file diff --git a/Ethosight/configs/ethosightapp.yaml b/Ethosight/configs/ethosightapp.yaml index 573f5ab..d0d0ae1 100644 --- a/Ethosight/configs/ethosightapp.yaml +++ b/Ethosight/configs/ethosightapp.yaml @@ -1,41 +1,43 @@ ################# basic app configuration -analyzeImageMethod: analyzeImage_precompiled -embeddings_path: baseline.embeddings -labels_path: baseline.labels -output_type: affinityScores -reasoner_type: chatgpt +analyzeImageMethod: analyzeImage_precompiled # Default method from EthosightMediaAnalyzer +embeddings_path: baseline.embeddings # Embeddings file located in embeddings folder +labels_path: baseline.labels # Labels file located in embeddings folder +output_type: affinityScores # The output result format +reasoner_type: '' # ChatGPT reasoner or others ... If no reasoning just empty string ################## configure the label space optimization label_space_optimization: enabled: true - rerun: false # if true, will rerun the optimization even if a results file exists, + rerun: true # if true, will rerun the optimization even if a results file exists, # otherwise will just load the most recent results file -# method: "semantic_relations" - method: "semantic_similarity" +# method: "semantic_relations" "semantic_similarity" + method: "semantic_similarity" # Default semantic_similarity parameters: threshold: .8 - max_labels: 10 # this is the max per ground truth label (not total) + max_labels: 10 # this is the max per ground truth label (not total) general_templates: - enabled: false + enabled: true template_path: "../templates/general_templates.txt" ################## configure the benchmark +# if benchmark is enabled it does benchmarking during optimization benchmark: enabled: true batch_mode: true batchsize: 256 - image_dir: "/home/hmlatapie/devactive/home/Ethosight/images/robbery" + #Images directory where to perform benchmarks + image_dir: "/home/vahagn/projects/EthosightNew/benchmark/images/robbery" # normallabel used to compute benchmark anomaly metrics normallabel: "normal event" # ground_truth_path can be a .txt file or a .csv # .txt : labels only # .csv : filename, label pairs ... first row in file is assumed to be a header and is ignored # ground_truth_path: "../images/robbery/image-labels2.csv" #only one label per video - ground_truth_path: "/home/hmlatapie/devactive/home/Ethosight/images/robbery/image-labels.csv" #labeled correctly per frame + ground_truth_path: "/home/vahagn/projects/EthosightNew/benchmark/images/robbery/image-labels.csv" #labeled correctly per frame #normally ground_truth_path .csv contains all the labels so that is all you need - #however, if ground_truth_labels_path is specified, it will be used to generate + #however, if ground_truth_labels_path is specified, it will be used to generate # the ground truth labels and embeddings files #if not set and the system needs to generate, the csv will be used #as long as your .csv has all the labels, you don't need to set this @@ -53,23 +55,23 @@ benchmark: mapper: enabled: true # affinity_minthreshold is the minimum affinity score to consider a label - # if the affinity score is below the threshold, the label is set to + # if the affinity score is below the threshold, the label is set to # normallabel affinity_minthreshold: enabled: false threshold: 26 normallabel: "normal event" threshold: 26 #is this needed for video? - # modes: + # modes: # - passthrough - pass input affinity scores through to output # - labelToLabelAffinity01 - uses affinity between expanded labels and ground truth labels # - compositeLabels01 - create composite labels from input affinity scores, no reasoning - # - reasoning01 - create composite labels from input affinity scores, with reasoning + # - reasoning01 - create composite labels from input affinity scores, with reasoning # mode: 'passthrough' #mode: 'labelToLabelAffinity01' #mode: 'gpt4mode' #uses gpt4 to generate labels... input is the single top label # mode: 'gpt4mode2' #uses gpt4 to generate labels, input is the top n affinity scores - #submode: normallabelonly #only remap the normal label + #submode: normallabelonly #only remap the normal label mode: 'hardmap01' hardmapfile: '../datasets/ucf-crime/ucf-crime-hardmap.txt' # prompt: "Return a label we will use to help analyze an image. The context is to identify crime behavior versus normal behavior. The camera angles may be overhead or normal more horizontal security camera angles. Please analyze the following input label and return 'normal event' or 'crime event' <<{label}>> please return only 'normal event' or 'crime event' as the label with no extra text or delimiters of any kind." @@ -194,7 +196,7 @@ video: skip_frames: 0 normal_label_name: "normal event" label_mapping: "periods_count" - # modes: + # modes: # - majority - rank the labels by the number of frames # - longest_period - rank the labels by the number of consecutive frames # - periods_count - rank the labels by the number of periods @@ -206,4 +208,4 @@ visualization: ################# phase2 phase2: - maxiterations: 200 + maxiterations: 200 \ No newline at end of file