Skip to content

Commit

Permalink
Merge pull request #44 from ver228/development
Browse files Browse the repository at this point in the history
Development merge for new version
  • Loading branch information
ver228 committed Jun 11, 2018
2 parents cdaa6c0 + 94889bc commit 91fe03a
Show file tree
Hide file tree
Showing 109 changed files with 2,503 additions and 65,906 deletions.
11 changes: 11 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,14 @@ Gemfile*
*.o

tierpsy/extras/unet_RMSprop-5-04999-0.3997.h5
*.so
*.so
*.pyd
*.so
*.pyd
tierpsy/analysis/ske_create/segWormPython/cython_files/segWorm_cython.c
tierpsy/analysis/ske_create/segWormPython/cython_files/linearSkeleton_cython.c
tierpsy/analysis/ske_create/segWormPython/cython_files/cleanWorm_cython.c
tierpsy/analysis/stage_aligment/get_mask_diff_var.c
tierpsy/analysis/ske_create/segWormPython/cython_files/curvspace.c
tierpsy/analysis/ske_create/segWormPython/cython_files/circCurvature.c
50 changes: 28 additions & 22 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,41 +1,47 @@
language: python
python:
# We don't actually use the Travis Python, but this keeps it organized.
- "3.5"
- "3.6"
# Note from : https://travis-ci.org/Anaconda-Platform/anaconda-project/
# language: python sets up virtualenv and pip that we don't need.
# omitting language gives us ruby stuff. c seems likely to be a minimal setup.
language: c

notifications:
email: false

matrix:
include:
- os: linux
sudo: required
- os: osx
env :
- TRAVIS_PYTHON_VERSION=3.6

os:
- linux
#- osx

install:
# install anaconda
- sudo apt-get update
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
MINICONDA_OS=Linux ;
else
MINICONDA_OS=MacOSX ;
fi ;
echo "Fetching miniconda for $MINICONDA_OS" ;
curl https://repo.continuum.io/miniconda/Miniconda3-latest-$MINICONDA_OS-x86_64.sh -o miniconda.sh

- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
- hash -r
- conda config --set always_yes yes --set changeps1 no
- source "$HOME"/miniconda/bin/activate root
- printenv | sort
- conda config --set always_yes yes --set changeps1 no --set auto_update_conda false
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a

# create testing enviroment
- conda config --add channels conda-forge
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION
numpy matplotlib pytables pandas gitpython pyqt=5
h5py scipy scikit-learn scikit-image seaborn xlrd
cython statsmodels keras opencv
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy matplotlib pytables pandas gitpython pyqt=5 h5py scipy scikit-learn scikit-image seaborn xlrd cython statsmodels numba keras opencv tensorflow
- source activate test-environment
- pip install tensorflow

- printenv | sort

- export PATH=`echo "$PATH" | sed -e s@"$HOME"/miniconda/bin:@@g`
- printenv | sort

# install tierpsy
- bash installation/installation_script.sh --setup_modules
- bash installation/installation_script.sh

before_script:
#download examples
Expand Down
4 changes: 1 addition & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,4 @@ Tierpsy Tracker is a multi-animal tracker developed in the [MRC-LMS](http://lms.
## [Algorithm Explanation](docs/EXPLANATION.md)
## [Output Files](docs/OUTPUTS.md)

<img src="https://cloud.githubusercontent.com/assets/8364368/26658216/8d5599b2-4660-11e7-911b-c390330a15ee.gif" width="480">


<img src="https://user-images.githubusercontent.com/8364368/41229372-a9e8fcfa-6d73-11e8-877c-de306be55526.gif" width="1024">
45 changes: 36 additions & 9 deletions create_binaries/TierpsyTracker.spec
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# -*- mode: python -*-
#hidden imports needed for tierpsy, maybe there is a better way to call this...
DEBUG = False

#hidden imports needed for tierpsy. Each step is loaded dyniamically so I need to give the hint to pyinstaller

import tierpsy.analysis
base_name = os.path.dirname(tierpsy.analysis.__file__)
analysis_steps = [x for x in os.listdir(base_name) if os.path.exists(os.path.join(base_name, x, '__init__.py'))]
Expand All @@ -8,10 +11,13 @@ print(hidden_tierspy)

import os
import sys
import glob
from PyInstaller.compat import is_win, is_darwin, is_linux


import tierpsy
import open_worm_analysis_toolbox
import tierpsy_features
from tierpsy.helper.misc import FFMPEG_CMD, FFPROBE_CMD
from tierpsy.gui import SelectApp

Expand All @@ -21,10 +27,8 @@ SRC_SCRIPT_PATH = SelectApp.__file__
DST_BUILD=os.path.abspath('.')
CREATE_CONSOLE= is_win #make a separated console only in windows. I have to do this due to a problem with pyinstaller

DEBUG = False

#get additional files
#openworm additional files
#get additional files for openworm additional files
open_worm_path = os.path.dirname(open_worm_analysis_toolbox.__file__)

ow_feat = os.path.join('features', 'feature_metadata', 'features_list.csv')
Expand All @@ -35,6 +39,13 @@ ow_eigen = os.path.join('features', 'master_eigen_worms_N2.mat')
ow_eigen_src = os.path.join(open_worm_path, ow_eigen)
ow_eigen_dst = os.path.join('open_worm_analysis_toolbox', ow_eigen)

#get additional files for tierpsy_features
tierpsy_feat = os.path.join('features', 'feature_metadata', 'features_list.csv')
ow_feat_src = os.path.join(open_worm_path, ow_feat)
ow_feat_dst = os.path.join('open_worm_analysis_toolbox', ow_feat)

tierpsy_features_path = os.path.dirname(tierpsy_features.__file__)

#add ffmpeg and ffprobe
ffmpeg_src = FFMPEG_CMD
ffmpeg_dst = os.path.join('extras', os.path.basename(FFMPEG_CMD))
Expand All @@ -54,14 +65,22 @@ added_datas = [(ow_feat_dst, ow_feat_src, 'DATA'),
(ffmpeg_dst, ffmpeg_src, 'DATA'),
(ffprobe_dst, ffprobe_src, 'DATA')]

tierpsy_features_root = tierpsy_features_path.partition('tierpsy_features')[0]
for fname_src in glob.glob(os.path.join(tierpsy_features_path, 'extras', '**', '*'), recursive=True):
if os.path.basename(fname_src).startswith('.'):
continue
fname_dst = fname_src.replace(tierpsy_features_root, '')
added_datas.append((fname_dst, fname_src, 'DATA'))


#I add the file separator at the end, it makes my life easier later on
tierpsy_path = os.path.dirname(tierpsy.__file__)
tierpsy_path += os.sep

#add all the files in extras
for (dirpath, dirnames, filenames) in os.walk(os.path.join(tierpsy_path, 'extras')):
for fname in filenames:
if not fname.startswith('.'):
if not (fname.startswith('.') or fname.startswith('_')):
fname_src = os.path.join(dirpath, fname)
fname_dst = fname_src.replace(tierpsy_path, '')
added_datas.append((fname_dst, fname_src, 'DATA'))
Expand Down Expand Up @@ -93,7 +112,12 @@ a = Analysis([SRC_SCRIPT_PATH],
pathex=[DST_BUILD],
binaries=None,
datas = None,
hiddenimports=['h5py.defs', 'h5py.utils', 'h5py.h5ac', 'h5py._proxy',
hiddenimports=[
'ipywidgets',
'h5py.defs', 'h5py.utils', 'h5py.h5ac', 'h5py._proxy',
'scipy._lib.messagestream', 'cytoolz.utils',
'pandas._libs.tslibs.np_datetime', 'pandas._libs.tslibs.nattype',
'pandas._libs.skiplist',
'cython', 'sklearn', 'sklearn.neighbors.typedefs', 'pywt._extensions._cwt'] + hidden_tierspy,
hookspath=[],
runtime_hooks=[],
Expand All @@ -104,8 +128,10 @@ a = Analysis([SRC_SCRIPT_PATH],
#i was having problems with adding datas using Analysis, i decided to add them directly to a.datas

a.datas += added_datas
if is_darwin:
a.binaries.append(('libfreetype.6.dylib', '/usr/local/opt/freetype/lib/libfreetype.6.dylib', 'BINARY'))

f2c = '/usr/local/opt/freetype/lib/libfreetype.6.dylib'
if is_darwin and os.path.exists(f2c):
a.binaries.append(('libfreetype.6.dylib', f2c, 'BINARY'))
print([x for x in a.binaries if 'libfreetype' in x[0]])


Expand Down Expand Up @@ -155,5 +181,6 @@ else:
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx=False,
name='TierpsyTracker')

Binary file added create_binaries/_test/test.avi
Binary file not shown.
Binary file added create_binaries/_test/test.h5
Binary file not shown.
6 changes: 4 additions & 2 deletions create_binaries/_test/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,7 @@ pyinstaller --noconfirm --clean \
--hidden-import=h5py.utils \
--hidden-import=h5py.h5ac \
--hidden-import='h5py._proxy' \
--onefile \
test_pyinstaller.py
--onedir \
test_pyinstaller.py

cp test.avi ./dist/test_pyinstaller
5 changes: 5 additions & 0 deletions create_binaries/_test/test_pyinstaller.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@

print(cv2.__version__)

vid = cv2.VideoCapture('test.avi')
ret, image = vid.read()
print('Img size', image.shape)
vid.release()

inputFiles = "test.h5"
with h5py.File(inputFiles, 'w') as inputFileOpen:
print('good h5py')
Expand Down
17 changes: 12 additions & 5 deletions create_binaries/_test/test_pyinstaller.spec
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ block_cipher = None


a = Analysis(['test_pyinstaller.py'],
pathex=['/Users/ajaver/Documents/GitHub/Multiworm_Tracking/create_binaries/test'],
pathex=['/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/create_binaries/_test'],
binaries=[],
datas=[],
hiddenimports=['h5py.defs', 'h5py.utils', 'h5py.h5ac', 'h5py._proxy'],
Expand All @@ -13,16 +13,23 @@ a = Analysis(['test_pyinstaller.py'],
excludes=['PyQt4', 'PyQt4.QtCore', 'PyQt4.QtGui'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
exclude_binaries=True,
name='test_pyinstaller',
debug=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='test_pyinstaller')
9 changes: 9 additions & 0 deletions create_binaries/issues.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
* There is still problems with pyqt5...
It seems pyinstaller cannot work with the anaconda pyqt5 version. Luckly there are pip installers for all teh libraries. Execute before running.

pip install numpy matplotlib tables pandas pyqt5 h5py scipy scikit-learn scikit-image seaborn xlrd cython statsmodels numba keras tensorflow opencv-python-headless
pip install https://github.com/pyinstaller/pyinstaller/tarball/develop

* the pip version of pytables and h5py are not compatible in python 3.5, but they are compatible for 3.6


* Version 3.2 does not import correctly pyqt5 in OSX (https://github.com/pyinstaller/pyinstaller/issues/1874). I had to download and install (python setup.py install) the development version (http://www.pyinstaller.org/).

* Got some issue related with different versions of libfreetype.6.dylib.
Expand Down
17 changes: 15 additions & 2 deletions docs/EXPLANATION.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,27 @@ This step uses the profile intensity extracted in the [previous step](#int_profi

Using this algorithm the errors in head-tail identification decrease to 0.01% compared to 4.48% in the original [segWorm](https://github.com/openworm/SegWorm) implementation. Since this algorithm uses the median profile to identify switched regions, it can fail if [SKE_ORIENT](#ske_orient) previously did not correctly oriented most of the skeletons.

## Extract Features
## Extract Features (OpenWorm route)

### FEAT_CREATE
This step uses the [open worm analysis toolbox](https://github.com/openworm/open-worm-analysis-toolbox) to calculate the skeleton features explained in [`basename_features.hdf5`](OUTPUTS.md/#basename_features.hdf5).
This step uses the [open worm analysis toolbox](https://github.com/openworm/open-worm-analysis-toolbox) to calculate the skeleton features explained in [`basename_features.hdf5`](OUTPUTS.md#basename_featureshdf5-openworm-analysis-toolbox).

### FEAT\_MANUAL\_CREATE
Same as [FEAT_CREATE](#feat_create) but it will only use the indexes that were manually identified as worms using the [Tierpsy Tracker Viewer](HOWTO.md#tierpsy-tracker-viewer). The results will be saved as `basename_feat_manual.hdf5`.

### WCON_EXPORT
Currently only used in `WT2`. Export skeletons data in [`basename_features.hdf5`](OUTPUTS.md/#basename_features.hdf5) using the [WCON format](https://github.com/openworm/tracker-commons). In the future this step should be available in the default analysis sequence.

## Extract Features (Tierpsy Features route)

### FOOD_CNT
CURRENTLY AVAILABLE ONLY FOR [AEX](https://www.imperial.ac.uk/people/andre.brown) DATA. Calculate the food contour either using a pretrained neural network. The results are stored in [`/food_cnt_coord`](OUTPUTS.md/#food_cnt_coord). The process will be considered as failed if the contour solidity is larger than 0.98 and the results will not be saved. You can visualize the results using the [Tierpsy Tracker Viewer](HOWTO.md#tierpsy-tracker-viewer).

### FEAT_INIT
The smooth module in [tierpsy features](https://github.com/openworm/open-worm-analysis-toolbox) is used to smooth the skeletons over both time and space and to interpolate between small gaps of unskeletonized frames. As well to create the corresponding versions of the tables [`/blob_features`](OUTPUTS.md/#blob_features), [`/trajectories_data`](OUTPUTS.md/#trajectories_data) and [`/food_cnt_coord`](OUTPUTS.md/#food_cnt_coord)(if available) from the
[`basename_skeletons.hdf5`](OUTPUTS.md/#basename_skeletons.hdf5).

### FEAT_TIERSY
This step uses the [tierpsy features](https://github.com/openworm/open-worm-analysis-toolbox) to calculate the features explained in [`basename_featuresN.hdf5`](OUTPUTS.md#basename_featuresnhdf5-tierpsy-features).


15 changes: 10 additions & 5 deletions docs/HOWTO.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ Example files can be found [here](https://imperiallondon-my.sharepoint.com/:u:/g

You can analyze the videos using the [Batch Processing Multiple Files](#batch-processing-multiple-files) App. The videos require different analysis parameters since they belong to different setups, therefore they cannot be processed together.

For the multiworm video the `Parameters File` must be set to `MULTI_RIG.json` and the `File Pattern to Include` as `*.mov` as shown below:
For the multiworm video the `Parameters File` must be set to `MULTIWORM_OPENWORM.json` and the `File Pattern to Include` as `*.mov` as shown below:

<img width="450" alt="screen shot 2018-04-25 at 09 04 05" src="https://user-images.githubusercontent.com/8364368/39233808-6344ddca-4869-11e8-9c34-d3db9102109f.png">
<img width="450" alt="screen shot 2018-06-11 at 12 47 16" src="https://user-images.githubusercontent.com/8364368/41229893-9c0ab892-6d75-11e8-97d8-553bae8b4ea8.png">

For the multiworm video the `Parameters File` must be set to `WT2_clockwise.json` and the `File Pattern to Include` as `*.avi` as shown below:

Expand Down Expand Up @@ -171,8 +171,12 @@ You can manually correct the trajectories as shown below. Once you have finished

`Right key` : Decrease the frame by step size.

### Plotting the Analysis Results
The analysis will produce a set of files described [here](https://github.com/ver228/tierpsy-tracker/blob/development/docs/OUTPUTS.md). The extracted features are store in the files that end with [features.hdf5](https://github.com/ver228/tierpsy-tracker/blob/development/docs/OUTPUTS.md#basename_featureshdf5). You can access to them using [pandas](http://pandas.pydata.org/) and [pytables](http://www.pytables.org/). There are examples on how to do it in MATLAB in the [tierpsy_tools](https://github.com/aexbrown/tierpsy_tools) repository.
### Visualizing Analysis Results
The extracted features are store in the files that end with [featuresN.hdf5](https://github.com/ver228/tierpsy-tracker/blob/development/docs/OUTPUTS.md#basename_featuresNhdf5) if the tierpsy feature route was selected or in [features.hdf5](https://github.com/ver228/tierpsy-tracker/blob/development/docs/OUTPUTS.md#basename_featureshdf5) if the openworm route was selected. You can visualize the features in different ways as shown below:

![features](https://user-images.githubusercontent.com/8364368/41231110-e89f2e14-6d79-11e8-96d7-523f13844555.gif)

From the plotting window can either save the plots or export the data of individual features/trajectories into csv files. If you would like to compare the data of multiple experiments we strongly recommed you to use the [Features Summary](#features-summary) app. If you would like to work directly with the timeseries data we recommend you to use read the data using a scripting lenguage like python using the packages [pandas](http://pandas.pydata.org/) and [pytables](http://www.pytables.org/), or MATLAB following the examples in the [tierpsy_tools](https://github.com/aexbrown/tierpsy_tools) repository.

## Features Summary
![FeatSummary](https://user-images.githubusercontent.com/8364368/41034550-d3665230-6981-11e8-97d9-63c74ff24661.png)
Expand All @@ -190,4 +194,5 @@ The files will be located by doing a recursive search for matching the extension
| openworm | Ticked | feat_manual.hdf5 |
| openworm | Unticked | features.hdf5 |

The results are saved into two separated .csv file located in the root directory. The first file, `filenames_FEATURETYPE_SUMMARY_DATE.csv`, contains the names of all the files found in root the directory. The `is_good` column is set to `True` if the file is valid and used in the summary. The second file, `features_FEATURETYPE_SUMMARY_DATE.csv`, contains the corresponding features summarized as described in the [output files](https://github.com/ver228/tierpsy-tracker/blob/master/docs/OUTPUTS.md#features_summar) section. The two result files can be joined using the `file_id` column.
The results are saved into two separated .csv file located in the root directory. The first file, `filenames_FEATURETYPE_SUMMARY_DATE.csv`, contains the names of all the files found in root the directory. The `is_good` column is set to `True` if the file is valid and used in the summary. The second file, `features_FEATURETYPE_SUMMARY_DATE.csv`, contains the corresponding features summarized as described in the [output files](OUTPUTS.md#features_summar) section. The two result files can be joined using the `file_id` column.

18 changes: 10 additions & 8 deletions docs/INSTALLATION.md
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
*The instructions below are to install Tierpsy Tracker from the source code. I would recommend to do this only if you are using Linux or want to run the development version, otherwise use the double-click executables available for Windows (7 or latest) and OSX (Yosemite or latest) in the [releases page](https://github.com/ver228/tierpsy-tracker/releases).*

# System Requirements
- Freshly installed [miniconda](https://conda.io/miniconda.html) or at least setup up a new enviroment.
- Python 3.6 (I would recommend to use [miniconda](https://conda.io/miniconda.html)).
- Optional [ffmpeg](https://ffmpeg.org/download.html): ffprobe must be accessible from the command line to calculate the video timestamps.
- [C compiler compatible with cython](http://cython.readthedocs.io/en/latest/src/quickstart/install.html). In Windows, you can use [Visual C++ 2015 Build Tools](http://landinghub.visualstudio.com/visual-cpp-build-tools). In OSX, if you install [homebrew](https://brew.sh/) it will setup the C compiler without the need to download XCode from the appstore.
- [C compiler compatible with cython](http://cython.readthedocs.io/en/latest/src/quickstart/install.html). In Windows, you can use [Visual C++ 2015 Build Tools](https://www.microsoft.com/en-us/download/details.aspx?id=48159). In OSX, if you install [homebrew](https://brew.sh/) it will setup the C compiler without the need to download XCode from the appstore.
- [Git](https://git-scm.com/). [Here](https://gist.github.com/derhuerst/1b15ff4652a867391f03) are some instructions to install it.

# Installation

1. Install the conda dependencies from the conda-forge channel:
1. Install dependencies:

Using pip:
```bash
conda config --add channels conda-forge

conda install -y numpy matplotlib pytables pandas gitpython pyqt=5 \
h5py scipy scikit-learn scikit-image seaborn xlrd cython statsmodels numba
pip install numpy matplotlib tables pandas pyqt5 h5py scipy scikit-learn scikit-image seaborn xlrd cython statsmodels numba keras==2.1.5 tensorflow opencv-python-headless
```

conda install -y -c conda-forge keras opencv tensorflow
Using anaconda:
```bash
conda install --channel conda-forge numpy matplotlib pytables pandas gitpython pyqt h5py \
scipy scikit-learn scikit-image seaborn xlrd cython statsmodels numba keras=2.1.5 opencv tensorflow
```

2. Clone this repository either using the [Github Desktop](https://desktop.github.com/) or from the command line as:
Expand Down
Loading

0 comments on commit 91fe03a

Please sign in to comment.