-
Notifications
You must be signed in to change notification settings - Fork 0
/
Capstone - Push Up Detector (MoveNet, combining experiment(failed)) 🐲
1 lines (1 loc) · 39.9 KB
/
Capstone - Push Up Detector (MoveNet, combining experiment(failed)) 🐲
1
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Capstone - Push Up Detector (MoveNet, combining experiment(failed)) 🐲","provenance":[{"file_id":"1BQNF_CD-36vpeI--L1pI0-uJPMMyxfmd","timestamp":1652123449224},{"file_id":"1J8E2UZoWUZcd5mp6brqSD_-gpqaWonMa","timestamp":1651865637744}],"collapsed_sections":[],"mount_file_id":"1J8E2UZoWUZcd5mp6brqSD_-gpqaWonMa","authorship_tag":"ABX9TyNwVw2cVVFeysZjrUYAA6hj"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","source":["# 💪🏽**Push Up Detector**💦✨\n","This model is designed to detect push up activity that consists of the up and down movement. The dataset will be in the form of stationary pose landmarks. One push up will be counted if the model detected several constant and continuous push up 'Up' pose followed by several constant and continuous push up 'Down' pose.\n","\n","This notebook is presented to you by Bangkit Team C22-PS072's ML Squad💖:\n","\n","\n","* Deandra Setyaputri - M2010F1120\n","* Wilma Elysia - M7013F1348\n","\n"],"metadata":{"id":"3YSzRfpc5NnJ"}},{"cell_type":"markdown","source":["## 0. Manage Dependencies"],"metadata":{"id":"tiI0kJBbwui0"}},{"cell_type":"code","source":["!pip install tensorflow tensorflow-gpu opencv-python matplotlib sklearn tensorflow_hub"],"metadata":{"id":"KZVy7KhvXSAN"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import matplotlib.pyplot as plt\n","import numpy as np\n","import glob\n","import cv2\n","import os\n","import enum\n","import tensorflow as tf\n","from tensorflow import keras\n","import tensorflow_hub as hub"],"metadata":{"id":"hQly5z6WydbC"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["## 1. Preprocess Input Images\n","First, create a copy of every image data where they are flipped horizontally so that the model can accurately detect push-up poses regardless if the user is facing left or right."],"metadata":{"id":"dspDqEBd_Vq6"}},{"cell_type":"code","source":["from google.colab import drive\n","\n","drive.mount(\"/content/drive\") \n","root = \"/content/drive/My Drive/Datasets/pushup/\"\n","\n","pushup_up_folder = root + \"pushup_up\" + \"/*.*\"\n","pushup_down_folder = root + \"pushup_down\" + \"/*.*\""],"metadata":{"id":"qNz5rgwh_eZT","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1652124198278,"user_tz":-420,"elapsed":20038,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"668ef0ee-e399-4661-d806-c62083bc70f9"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["def flip_img_folder(folder):\n"," for file in glob.glob(folder):\n"," image = cv2.imread(file)\n"," flipped_img_path = file[:-4] + \"-reversed.\" + file[-3:]\n"," # file[:-4] is the name of the path and image name without the .jpg or .png extension\n","\n"," if(file[-13:-4] != \"-reversed\" and os.path.exists(flipped_img_path) == False):\n"," flipped_img = cv2.flip(image, 1)\n"," cv2.imwrite(flipped_img_path, flipped_img)"],"metadata":{"id":"WmgnqaZxEKga"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["flip_img_folder(pushup_up_folder)\n","flip_img_folder(pushup_down_folder)"],"metadata":{"id":"t5R58Me4PnZm"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["We will convert all the image data to arrays of landmarks representing the human pose in each image. Then create a csv file containing those data to be used in training later."],"metadata":{"id":"tV_qwPJfD9qX"}},{"cell_type":"code","source":["modelEstimator = hub.load(\"https://tfhub.dev/google/movenet/singlepose/lightning/4\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":433},"id":"u_smewTVU8JB","executionInfo":{"status":"error","timestamp":1652128043138,"user_tz":-420,"elapsed":8973,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"e8b3c50d-dd63-412e-9d46-7cf241fc87ac"},"execution_count":null,"outputs":[{"output_type":"error","ename":"KeyboardInterrupt","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36mget_attr\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 2689\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mc_api_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtf_buffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mbuf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2690\u001b[0;31m \u001b[0mpywrap_tf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_OperationGetAttrValueProto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_c_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbuf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2691\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpywrap_tf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mInvalidArgumentError\u001b[0m: Operation 'block_14_depthwise_BN/ReadVariableOp' has no attr named '_class'.","\nDuring handling of the above exception, another exception occurred:\n","\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)","\u001b[0;32m<ipython-input-20-ce29fffd7f26>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmodelEstimator\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhub\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"https://tfhub.dev/google/movenet/singlepose/lightning/4\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow_hub/module_v2.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(handle, tags, options)\u001b[0m\n\u001b[1;32m 104\u001b[0m module_path, tags=tags, options=options)\n\u001b[1;32m 105\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msaved_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_v2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtags\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtags\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_is_hub_module_v1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mis_hub_module_v1\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/load.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(export_dir, tags, options)\u001b[0m\n\u001b[1;32m 934\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mIf\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtags\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0mdon\u001b[0m\u001b[0;31m'\u001b[0m\u001b[0mt\u001b[0m \u001b[0mmatch\u001b[0m \u001b[0ma\u001b[0m \u001b[0mMetaGraph\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mSavedModel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 935\u001b[0m \"\"\"\n\u001b[0;32m--> 936\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_internal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexport_dir\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtags\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"root\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 937\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 938\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/load.py\u001b[0m in \u001b[0;36mload_internal\u001b[0;34m(export_dir, tags, options, loader_cls, filters)\u001b[0m\n\u001b[1;32m 973\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 974\u001b[0m loader = loader_cls(object_graph_proto, saved_model_proto, export_dir,\n\u001b[0;32m--> 975\u001b[0;31m ckpt_options, options, filters)\n\u001b[0m\u001b[1;32m 976\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNotFoundError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 977\u001b[0m raise FileNotFoundError(\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/load.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, object_graph_proto, saved_model_proto, export_dir, ckpt_options, save_options, filters)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0mlibrary\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmeta_graph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_def\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlibrary\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[0msaved_object_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_proto\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 152\u001b[0;31m wrapper_function=_WrapperFunction))\n\u001b[0m\u001b[1;32m 153\u001b[0m \u001b[0;31m# Store a set of all concrete functions that have been set up with\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0;31m# captures.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/function_deserialization.py\u001b[0m in \u001b[0;36mload_function_def_library\u001b[0;34m(library, saved_object_graph, load_shared_name_suffix, wrapper_function)\u001b[0m\n\u001b[1;32m 407\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 408\u001b[0m \u001b[0mstructured_input_signature\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstructured_input_signature\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 409\u001b[0;31m structured_outputs=structured_outputs)\n\u001b[0m\u001b[1;32m 410\u001b[0m \u001b[0;31m# Restores gradients for function-call ops (not the same as ops that use\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 411\u001b[0m \u001b[0;31m# custom gradients)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/function_def_to_graph.py\u001b[0m in \u001b[0;36mfunction_def_to_graph\u001b[0;34m(fdef, structured_input_signature, structured_outputs, input_shapes)\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mfunc_graph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 74\u001b[0m \u001b[0;31m# Add all function nodes to the graph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 75\u001b[0;31m \u001b[0mimporter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_graph_def_for_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgraph_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 76\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 77\u001b[0m \u001b[0;31m# Initialize fields specific to FuncGraph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/importer.py\u001b[0m in \u001b[0;36mimport_graph_def_for_function\u001b[0;34m(graph_def, name)\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0;34m\"\"\"Like import_graph_def but does not validate colocation constraints.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 414\u001b[0m return _import_graph_def_internal(\n\u001b[0;32m--> 415\u001b[0;31m graph_def, validate_colocation_constraints=False, name=name)\n\u001b[0m\u001b[1;32m 416\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 417\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/importer.py\u001b[0m in \u001b[0;36m_import_graph_def_internal\u001b[0;34m(graph_def, input_map, return_elements, validate_colocation_constraints, name, producer_op_list)\u001b[0m\n\u001b[1;32m 514\u001b[0m \u001b[0;31m# TODO(skyewm): avoid sending serialized FunctionDefs back to the TF_Graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 515\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 516\u001b[0;31m \u001b[0m_ProcessNewOps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgraph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 517\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 518\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mgraph_def\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlibrary\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mgraph_def\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlibrary\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfunction\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/importer.py\u001b[0m in \u001b[0;36m_ProcessNewOps\u001b[0;34m(graph)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0moriginal_device\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0mnew_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_device\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m''\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mcolocation_names\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_GetColocationNames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnew_op\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcolocation_names\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0mcolocation_pairs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mnew_op\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcolocation_names\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/importer.py\u001b[0m in \u001b[0;36m_GetColocationNames\u001b[0;34m(op)\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0mcolocation_names\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 296\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 297\u001b[0;31m \u001b[0mclass_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_attr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'_class'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 298\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0;31m# No _class attr\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36mget_attr\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 2688\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2689\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mc_api_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtf_buffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mbuf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2690\u001b[0;31m \u001b[0mpywrap_tf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_OperationGetAttrValueProto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_c_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbuf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2691\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpywrap_tf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2692\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mInvalidArgumentError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}]},{"cell_type":"code","source":["model1 = keras.models.load_model('/content/movenet_singlepose_lightning_4')\n","model1.summary()"],"metadata":{"id":"a0BtjvUgaTyK"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["modelEstimator = tf.saved_model.load('/content/movenet_singlepose_lightning_4')"],"metadata":{"id":"7m5_aXYIe7Z2"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["hub_layer = hub.KerasLayer(\"https://tfhub.dev/google/movenet/singlepose/lightning/4\", trainable=True)\n","m = tf.keras.Sequential([\n"," hub_layer\n","])\n","# m.summary()\n","# modelEstimator = tf.saved_model.load('/content/movenet_singlepose_lightning_4')\n","# movenet = modelEstimator.signatures['serving_default']\n","# movenet.summary()"],"metadata":{"id":"gPZK5IHId-ot","executionInfo":{"status":"error","timestamp":1652128088843,"user_tz":-420,"elapsed":28092,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"colab":{"base_uri":"https://localhost:8080/","height":381},"outputId":"36efd871-631d-40b0-db4f-25be5034e66a"},"execution_count":null,"outputs":[{"output_type":"error","ename":"ValueError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)","\u001b[0;32m<ipython-input-21-568cb19438c1>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mhub_layer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhub\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mKerasLayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"https://tfhub.dev/google/movenet/singlepose/lightning/4\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrainable\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m m = tf.keras.Sequential([\n\u001b[1;32m 3\u001b[0m \u001b[0mhub_layer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m ])\n\u001b[1;32m 5\u001b[0m \u001b[0;31m# m.summary()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow_hub/keras_layer.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, handle, trainable, arguments, _sentinel, tags, signature, signature_outputs_as_dict, output_key, output_shape, load_options, **kwargs)\u001b[0m\n\u001b[1;32m 169\u001b[0m \"a signature (or using a legacy TF1 Hub format).\")\n\u001b[1;32m 170\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 171\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_callable\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_callable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 172\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_setup_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrainable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 173\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow_hub/keras_layer.py\u001b[0m in \u001b[0;36m_get_callable\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 297\u001b[0m \"Loaded object is not callable and has no signatures.\")\n\u001b[1;32m 298\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_signature\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 299\u001b[0;31m raise ValueError(\"Signature name has to be specified for non-callable \"\n\u001b[0m\u001b[1;32m 300\u001b[0m \"saved models (if not legacy TF1 Hub format).\")\n\u001b[1;32m 301\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_signature\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_func\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msignatures\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mValueError\u001b[0m: Signature name has to be specified for non-callable saved models (if not legacy TF1 Hub format)."]}]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1Pte6b1bgWKv"},"outputs":[],"source":["# Define the model\n","inputs = tf.keras.Input(shape=(51))\n","embedding = landmarks_to_embedding(inputs)\n","\n","layer = keras.layers.Dense(128, activation=tf.nn.relu6)(embedding)\n","layer = keras.layers.Dropout(0.5)(layer)\n","layer = keras.layers.Dense(64, activation=tf.nn.relu6)(layer)\n","layer = keras.layers.Dropout(0.5)(layer)\n","outputs = keras.layers.Dense(2, activation=\"sigmoid\")(layer)\n","\n","model = keras.Model(inputs, outputs, name=\"pushupdetector\")\n","model.summary()"]},{"cell_type":"code","source":["def processPose(image):\n"," img = tf.image.resize_with_pad(np.expand_dims(image, axis=0), 192,192)\n"," img = tf.cast(img, dtype=tf.int32)\n"," # Run model inference.\n"," outputs = model(img)\n"," # Get the model prediction.\n"," keypoints_with_scores = outputs['output_0'].numpy()\n"," keypoints = keypoints_with_scores.flatten().reshape(17,3)\n"," rounded_keypoints = []\n"," for [x, y, score] in keypoints:\n"," if score < 0.2:\n"," fixed_keypoints.append([0, 0, 0])\n"," else:\n"," fixed_keypoints.append([x, y, score])\n"," return np.array(rounded_keypoints)"],"metadata":{"id":"tAnE2ZRkYdVn"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# trying out the library\n","for file in glob.glob(pushup_up_folder):\n"," img = cv2.imread(file)\n"," keypoints = processPose(img).flatten().reshape(17, 3)\n"," \n"," break\n","img = cv2.imread('/content/800px-A_black_image.jpg')\n","keypoints = processPose(img).flatten().reshape(17, 3)\n","fixed_keypoints = []\n","for [x, y, score] in keypoints:\n"," if score < 0.2:\n"," fixed_keypoints.append([0, 0, 0])\n"," else:\n"," fixed_keypoints.append([x, y, score])\n","print(fixed_keypoints)\n"," # landmarks = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_world_landmarks.landmark]).flatten()\n"," # print(landmarks)"],"metadata":{"id":"2770Ruv2SKiD","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1652020395774,"user_tz":-420,"elapsed":356,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"2e34e6b7-95b1-485b-dbe3-b1d16e52daec"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"id":"HgQMdfeT65Z5"},"outputs":[],"source":["def get_center_point(landmarks, left_bodypart, right_bodypart):\n"," \"\"\"Calculates the center point of the two given landmarks.\"\"\"\n","\n"," left = tf.gather(landmarks, left_bodypart.value, axis=1)\n"," right = tf.gather(landmarks, right_bodypart.value, axis=1)\n"," center = left * 0.5 + right * 0.5\n"," return center\n","\n","\n","def get_pose_size(landmarks, torso_size_multiplier=2.5):\n"," \"\"\"Calculates pose size.\n","\n"," It is the maximum of two values:\n"," * Torso size multiplied by `torso_size_multiplier`\n"," * Maximum distance from pose center to any pose landmark\n"," \"\"\"\n"," # Hips center\n"," hips_center = get_center_point(landmarks, BodyPart.LEFT_HIP, \n"," BodyPart.RIGHT_HIP)\n","\n"," # Shoulders center\n"," shoulders_center = get_center_point(landmarks, BodyPart.LEFT_SHOULDER,\n"," BodyPart.RIGHT_SHOULDER)\n","\n"," # Torso size as the minimum body size\n"," torso_size = tf.linalg.norm(shoulders_center - hips_center)\n","\n"," # Pose center\n"," pose_center_new = get_center_point(landmarks, BodyPart.LEFT_HIP, \n"," BodyPart.RIGHT_HIP)\n"," pose_center_new = tf.expand_dims(pose_center_new, axis=1)\n"," # Broadcast the pose center to the same size as the landmark vector to\n"," # perform substraction\n"," pose_center_new = tf.broadcast_to(pose_center_new,\n"," [tf.size(landmarks) // (17*2), 17, 2])\n","\n"," # Dist to pose center\n"," d = tf.gather(landmarks - pose_center_new, 0, axis=0,\n"," name=\"dist_to_pose_center\")\n"," # Max dist to pose center\n"," max_dist = tf.reduce_max(tf.linalg.norm(d, axis=0))\n","\n"," # Normalize scale\n"," pose_size = tf.maximum(torso_size * torso_size_multiplier, max_dist)\n","\n"," return pose_size\n","\n","\n","def normalize_pose_landmarks(landmarks):\n"," \"\"\"Normalizes the landmarks translation by moving the pose center to (0,0) and\n"," scaling it to a constant pose size.\n"," \"\"\"\n"," # Move landmarks so that the pose center becomes (0,0)\n"," pose_center = get_center_point(landmarks, BodyPart.LEFT_HIP, \n"," BodyPart.RIGHT_HIP)\n"," pose_center = tf.expand_dims(pose_center, axis=1)\n"," # Broadcast the pose center to the same size as the landmark vector to perform\n"," # substraction\n"," pose_center = tf.broadcast_to(pose_center, \n"," [tf.size(landmarks) // (17*2), 17, 2])\n"," landmarks = landmarks - pose_center\n","\n"," # Scale the landmarks to a constant pose size\n"," pose_size = get_pose_size(landmarks)\n"," landmarks /= pose_size\n","\n"," return landmarks\n","\n","\n","def landmarks_to_embedding(landmarks_and_scores):\n"," \"\"\"Converts the input landmarks into a pose embedding.\"\"\"\n"," # Reshape the flat input into a matrix with shape=(17, 3)\n"," reshaped_inputs = keras.layers.Reshape((17, 3))(landmarks_and_scores)\n","\n"," # Normalize landmarks 2D\n"," landmarks = normalize_pose_landmarks(reshaped_inputs[:, :, :2])\n","\n"," # Flatten the normalized landmark coordinates into a vector\n"," embedding = keras.layers.Flatten()(landmarks)\n","\n"," return embedding"]},{"cell_type":"code","source":["def detect_pose_from_folder(folder, folder_label):\n"," landmarks = []\n"," labels = []\n"," for file in glob.glob(folder):\n"," image = cv2.imread(file)\n"," keypoints = processPose(image).flatten()\n"," if len(keypoints)!=0:\n"," landmarks.append(keypoints)\n"," labels.append(folder_label)\n"," return landmarks, labels"],"metadata":{"id":"9EXF5IfUhpG8"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# '1' is the label for the up pushup movement\n","pushup_up_landmarks, pushup_up_labels = detect_pose_from_folder(pushup_up_folder, 1)\n","pushup_down_landmarks, pushup_down_labels = detect_pose_from_folder(pushup_down_folder, 0)"],"metadata":{"id":"FpKtOMcMZle5"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["landmarks = pushup_up_landmarks + pushup_down_landmarks\n","labels = pushup_up_labels + pushup_down_labels"],"metadata":{"id":"B4lovPOAevDy"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["print(len(landmarks))\n","print(len(labels))\n","print(landmarks)"],"metadata":{"id":"1jh-UB9tg9uz"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import random\n","\n","X = np.array(landmarks)\n","y = np.array(labels)\n","dataset = [(X[i], y[i]) for i in range(0, len(labels))]\n","random.shuffle(dataset)\n","X.shape"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"RCrpQ0hbqs6Q","executionInfo":{"status":"ok","timestamp":1652018236768,"user_tz":-420,"elapsed":357,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"b2155cb0-5ccd-4f29-8000-42548c67a2f0"},"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(378, 51)"]},"metadata":{},"execution_count":46}]},{"cell_type":"code","source":["import csv\n","\n","# open the file in the write mode\n","with open('/content/drive/My Drive/Datasets/pushup/pushup_dataset_movenet', 'w') as f:\n"," # create the csv writer\n"," writer = csv.writer(f)\n","\n"," # write the header to the csv file\n"," writer.writerow(['pushup_pose', 'pose_world_landmarks'])\n","\n"," for landmark, label in dataset:\n"," writer.writerow([label, landmark])"],"metadata":{"id":"tgqCaUGSa5nv"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["## 2. Train Model"],"metadata":{"id":"SCvb0Duits5j"}},{"cell_type":"code","source":["from sklearn.model_selection import train_test_split\n","\n","X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n","X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5)"],"metadata":{"id":"kdtSr2kPwoEr"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# model = keras.Sequential([\n","# keras.layers.Input(shape=(64)),\n","# keras.layers.Dense(128, activation='relu'),\n","# keras.layers.Dropout(0.5),\n","# keras.layers.Dense(64, activation='relu'),\n","# keras.layers.Dropout(0.5),\n","# keras.layers.Dense(2, activation=\"sigmoid\")\n","# ])\n","\n","model.summary()"],"metadata":{"id":"vV1wBCrfuH6K"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["model.compile(\n"," optimizer='adam',\n"," loss='sparse_categorical_crossentropy',\n"," metrics=['accuracy']\n",")\n","\n","history = model.fit(X_train, y_train,\n"," epochs=100,\n"," batch_size=16,\n"," validation_data=(X_val, y_val))"],"metadata":{"id":"K8I9vzbs2uC9"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["print(\"Evaluate on test data\")\n","results = model.evaluate(X_test, y_test, batch_size=32)\n","print(\"test loss, test acc:\", results)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"YOyDDFql4YkD","executionInfo":{"status":"ok","timestamp":1652018441318,"user_tz":-420,"elapsed":353,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"693b3f53-e6ad-47cc-b10b-f6872cfeafb6"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Evaluate on test data\n","2/2 [==============================] - 0s 5ms/step - loss: 0.2047 - accuracy: 0.9298\n","test loss, test acc: [0.2047211229801178, 0.9298245906829834]\n"]}]},{"cell_type":"code","source":["# test the model prediction\n","\n","for file in glob.glob(pushup_up_folder)[:3]:\n"," image = cv2.imread(file)\n"," keypoints = processPose(image).flatten().reshape(1, 51)\n"," predd = model.predict(keypoints)\n"," posee = np.argmax(predd)\n"," confidence = predd[0][posee]\n"," print(predd)\n"," print(posee)\n"," print(confidence)\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"4IVTqFkMECuG","executionInfo":{"status":"ok","timestamp":1652021143177,"user_tz":-420,"elapsed":876,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"7b347a90-7e61-4b35-856b-549aacd0bffa"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["[[2.656579e-04 9.997175e-01]]\n","1\n","0.9997175\n","[[1.6873252e-06 9.9999791e-01]]\n","1\n","0.9999979\n","[[9.3793496e-05 9.9990046e-01]]\n","1\n","0.99990046\n"]}]},{"cell_type":"markdown","source":["## 3. Save The Model"],"metadata":{"id":"rSBstkcD_nva"}},{"cell_type":"code","source":["!pip install pyyaml h5py"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"mhOe3RvD_sz6","executionInfo":{"status":"ok","timestamp":1652018569257,"user_tz":-420,"elapsed":4775,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"2763135e-2652-4863-adf1-4802e20da856"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (3.13)\n","Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (3.1.0)\n","Requirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from h5py) (1.21.6)\n","Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py) (1.5.2)\n"]}]},{"cell_type":"code","source":["model.save('pushup-counter-movenet.h5')"],"metadata":{"id":"DdM8p7nWBmDQ"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["## 4. Convert to TFlite"],"metadata":{"id":"dUEal5PAiwsW"}},{"cell_type":"code","source":["converter = tf.lite.TFLiteConverter.from_keras_model(model)\n","tflite_model = converter.convert()\n","\n","with open('model-movenet.tflite', 'wb') as f:\n"," f.write(tflite_model)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"N8bGSSWYi5PH","executionInfo":{"status":"ok","timestamp":1652018602286,"user_tz":-420,"elapsed":8675,"user":{"displayName":"Deandra Setyaputri M2010F1120","userId":"08611358996117971618"}},"outputId":"a71b7ce8-59c0-4aaa-c62e-0d58de3c997e"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["INFO:tensorflow:Assets written to: /tmp/tmprwxutj_u/assets\n"]},{"output_type":"stream","name":"stderr","text":["WARNING:absl:Buffer deduplication procedure will be skipped when flatbuffer library is not properly loaded\n"]}]},{"cell_type":"markdown","source":["## 5. Create Push Up Counter with Webcam"],"metadata":{"id":"PPtITvSt7X-m"}},{"cell_type":"markdown","source":["\n","\n","```\n","!pip install gTTS ipython\n","\n","from gtts import gTTS\n","from IPython.display import Audio\n","\n","def text_to_speech(sentence):\n"," tts = gTTS(sentence) #Provide the string to convert to speech\n"," sound_file = '1.wav'\n"," tts.save(sound_file) #save the string converted to speech as a .wav file\n"," return sound_file\n","\n","audio = text_to_speech('test')\n","Audio(audio, autoplay=True)\n","\n","def allEqual(iterable):\n"," iterator = iter(iterable)\n"," \n"," try:\n"," firstItem = next(iterator)\n"," except StopIteration:\n"," return True\n"," \n"," for x in iterator:\n"," if x!=firstItem:\n"," return False\n"," return True\n","\n","threshold = 0.75\n","history = []\n","fail_count = 0\n","pushup_count = 0\n","num_frames_requirement = 15\n","pushup_down_done = False\n","\n","cap = cv2.VideoCapture(0)\n","\n","with mp_pose.Pose(static_image_mode=False, model_complexity=1, enable_segmentation=False, min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n"," while cap.isOpened():\n"," # Read feed\n"," ret, frame = cap.read()\n"," frame = cv2.flip(frame, 1)\n","\n"," results = pose.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n"," \n"," cv2.putText(frame, \"push up count:\"+str(pushup_count), (3,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n"," \n"," if results.pose_world_landmarks:\n"," mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) # Draw pose connections\n"," live_landmarks = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_world_landmarks.landmark]).flatten()\n","# print(live_landmarks)\n"," \n"," y_pred = model.predict(live_landmarks.reshape(1, 132))\n"," pushup_pose = np.argmax(y_pred)\n"," pose_confidence = y_pred[0][pushup_pose]\n","\n"," if pose_confidence > threshold:\n"," # reset continuous fail count\n"," fail_count = 0\n","\n"," history.append(pushup_pose)\n","\n"," if len(history) >= (2*num_frames_requirement):\n"," if allEqual(history[(-1*num_frames_requirement):]):\n"," if history[(-1*num_frames_requirement)] != history[(-1*num_frames_requirement)-1]:\n"," if allEqual(history[(-2*num_frames_requirement):(-1*num_frames_requirement)]):\n"," if pushup_pose == 0:\n"," pushup_down_done = True\n"," else:\n"," if pushup_down_done:\n"," pushup_count += 1\n"," pushup_down_done = False\n"," print(pushup_count)\n"," cv2.putText(frame, \"push up count:\"+str(pushup_count), (3,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n"," else:\n"," fail_count += 1\n"," if fail_count > 15:\n"," history = []\n"," pushup_down_done = False\n","\n"," cv2.imshow('OpenCV Feed', frame)\n","\n"," if cv2.waitKey(10) & 0xFF == ord('q'):\n"," break\n"," cap.release()\n"," cv2.destroyAllWindows()\n","```"],"metadata":{"id":"F-3xufx-p-A-"}}]}