diff --git a/.zenodo.json b/.zenodo.json new file mode 100644 index 000000000..9fac9450b --- /dev/null +++ b/.zenodo.json @@ -0,0 +1,30 @@ +{ + "description": "

KM3Pipe - pipeline framework for KM3NeT.

", + "license": "MIT", + "title": "tamasgal/km3pipe: KM3Pipe 6.8.0", + "upload_type": "software", + "publication_date": "2017-06-14", + "creators": [ + { + "affiliation": "Erlangen Centre for Astroparticle Physics (ECAP)", + "name": "Tamas Gal" + }, + { + "affiliation": "Instituto de Física Corpuscular (IFIC)", + "name": "Moritz Lotze" + } + ], + "access_right": "open", + "related_identifiers": [ + { + "scheme": "url", + "identifier": "https://github.com/tamasgal/km3pipe/tree/v6.8.0", + "relation": "isSupplementTo" + }, + { + "scheme": "doi", + "identifier": "10.5281/zenodo.808829", + "relation": "isPartOf" + } + ] +} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a0afca67d..7802e2ebb 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,11 @@ Unreleased changes ------------------ +6.8.1 / 2017-06-15 +------------------ +- DOI citation added +- tohdf5: aa pump: make zed correction (mc tracks) optional + 6.8.0 / 2017-06-13 ------------------ * minor bugfixes diff --git a/README.rst b/README.rst index bd989a897..100530bfa 100644 --- a/README.rst +++ b/README.rst @@ -3,12 +3,14 @@ KM3Pipe .. image:: https://travis-ci.org/tamasgal/km3pipe.svg?branch=develop :target: https://travis-ci.org/tamasgal/km3pipe +.. image:: https://zenodo.org/badge/24634697.svg + :target: https://zenodo.org/badge/latestdoi/24634697 KM3Pipe is a framework for KM3NeT related stuff including MC, data files, live access to detectors and databases, parsers for different file formats and an easy to use framework for batch processing. The framework tries to standardise the way the data is processed within our collaboration by providing a Pipeline-class, which can be used to put together different built-in or user made Pumps and Modules. Pumps act as data readers/parsers (from files, memory or even socket connections) and Modules take care of data processing and output. Such a Pipeline setup can then be used to iteratively process data in a file. In our case for example we store several thousands of neutrino interaction events in a bunch of files and KM3Pipe is used to put together an analysis chain which processes each event one-by-one. -Although it is mainly designed for the KM3NeT neutrino detectors, it can easily be extended to support any kind of data formats. Feel free to get in touch if you’re looking for a small, versatile framework which provides a quite straightforward module system to make code exchange between your project members as easy as possible. KM3Pipe already comes with several types of Pumps (the modules which act as a data-parser/reader) so it should be easy to find an example to implement your owns. As of version 1.2.3 you find Pumps based on popular formats like HDF5 (https://www.hdfgroup.org), ROOT (https://root.cern.ch) but also some very specialised project internal binary data formats, which on the other hand can act templates for your own ones. Just have a look at the io subpackage and of course the documention if you’re interested! +Although it is mainly designed for the KM3NeT neutrino detectors, it can easily be extended to support any kind of data formats. Feel free to get in touch if you’re looking for a small, versatile framework which provides a quite straightforward module system to make code exchange between your project members as easy as possible. KM3Pipe already comes with several types of Pumps (the modules which act as a data-parser/reader) so it should be easy to find an example to implement your owns. As of version 6.8.0 you find Pumps based on popular formats like HDF5 (https://www.hdfgroup.org), ROOT (https://root.cern.ch) but also some very specialised project internal binary data formats, which on the other hand can act templates for your own ones. Just have a look at the io subpackage and of course the documention if you’re interested! Read the docs at http://km3pipe.readthedocs.io (Currently outdated due to scipy compile issues) diff --git a/docs/index.rst b/docs/index.rst index fec898eda..f62fef2c7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,6 +11,9 @@ .. image:: https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat :target: http://km3pipe.readthedocs.io/en/latest/ +.. image:: https://zenodo.org/badge/24634697.svg + :target: https://zenodo.org/badge/latestdoi/24634697 + .. include:: summary.rst .. toctree:: diff --git a/docs/version.txt b/docs/version.txt index 7e2a8fc8f..1b24c7745 100755 --- a/docs/version.txt +++ b/docs/version.txt @@ -1,2 +1,2 @@ -KM3Pipe 6.8.0 +KM3Pipe 6.8.1 ============= diff --git a/km3pipe/__version__.py b/km3pipe/__version__.py index ec5e1d130..9fa89cf15 100644 --- a/km3pipe/__version__.py +++ b/km3pipe/__version__.py @@ -28,7 +28,7 @@ log = logging.getLogger(__name__) # pylint: disable=C0103 -version_info = (6, 8, 0, 'final', 0) +version_info = (6, 8, 1, 'final', 0) def _get_version(version_info): diff --git a/km3pipe/io/aanet.py b/km3pipe/io/aanet.py index 31b51d57c..bbdc5b38e 100644 --- a/km3pipe/io/aanet.py +++ b/km3pipe/io/aanet.py @@ -43,6 +43,8 @@ class AanetPump(Pump): ``'minidst', 'jevt_jgandalf', 'generic_track', 'ancient_recolns'`` use_aart_sps_lib: bool, optional [default=False] Use Aart's local SPS copy (@ Lyon) via ``gSystem.Load``? + apply_zed_correction: bool, optional [default=False] + correct ~400m offset in mc tracks. """ def __init__(self, **context): @@ -55,6 +57,7 @@ def __init__(self, **context): self.format = self.get('aa_fmt') self.use_id = self.get('use_id') or False self.use_aart_sps_lib = self.get('use_aart_sps_lib') or False + self.apply_zed_correction = self.get('apply_zed_correction') or False if self.additional: self.id = self.get('id') self.return_without_match = self.get("return_without_match") @@ -171,7 +174,7 @@ def blob_generator(self): def _read_event(self, event, filename): try: - if event.det_id <= 0: # apply ZED correction + if self.apply_zed_correction: #if event.det_id <= 0: # apply ZED correction for track in event.mc_trks: track.pos.z += 405.93 except AttributeError: diff --git a/km3pipe/utils/tohdf5.py b/km3pipe/utils/tohdf5.py index 383a0d89c..0881aaf54 100644 --- a/km3pipe/utils/tohdf5.py +++ b/km3pipe/utils/tohdf5.py @@ -4,7 +4,7 @@ Convert ROOT and EVT files to HDF5. Usage: - tohdf5 [-o OUTFILE] [-n EVENTS] [-e NROWS] [--aa-format=] [--aa-lib=] FILE... + tohdf5 [-o OUTFILE] [-n EVENTS] [-e NROWS] [--correct-zed] [--aa-format=] [--aa-lib=] FILE... tohdf5 [-o OUTFILE] [-n EVENTS] [-e NROWS] [-j] [-s] [-l] FILE... tohdf5 (-h | --help) tohdf5 --version @@ -22,6 +22,7 @@ -n EVENTS/RUNS Number of events/runs. -o OUTFILE Output file. -s --with-summaryslices Include summary slices [default: False] + --correct-zed Correc toffset in mc tracks (aanet) [default: False] -e --expected-rows NROWS Approximate number of events. Providing a rough estimate for this (100, 10000000, ...) will greatly improve reading/writing speed and @@ -87,6 +88,7 @@ def main(): aa_lib = args['--aa-lib'] with_summaryslices = args['--with-summaryslices'] with_timeslice_hits = args['--with-timeslice-hits'] + correct_zed = args['--correct-zed'] tohdf5(infiles, outfile, n, @@ -96,4 +98,5 @@ def main(): with_summaryslices=with_summaryslices, with_timeslice_hits=with_timeslice_hits, n_rows_expected=n_rows_expected, + apply_zed_correction=correct_zed, )