Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

gromacs+gsd/hoomd readers, general dist function, issues fixed #70

Merged
merged 51 commits into from
Nov 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
f7193c7
Merge branch 'refactor_tests' into gsd_reader
kai-luca Aug 16, 2024
10dc5f2
Merge branch 'issues_49_46' into gsd_reader
kai-luca Aug 16, 2024
bd629f1
WIP: Added test for GSDReader
kai-luca Aug 16, 2024
ed42526
WIP: added coordinates, velocities, uwcoordinate-calculation, type-ids
Aug 16, 2024
387a2e7
WIP: added quaternion orientations and started quaternion angmoms
Aug 19, 2024
dee697a
WIP: added diameter, mass, charge, body, moment of inertia
Aug 19, 2024
c5be59c
WIP: some changes
Aug 19, 2024
3049a52
gsd reader first working version
Aug 19, 2024
094d001
WIP: gsd reader angmoms implemented - check angmom from quaternion!!
Aug 20, 2024
d32d750
fixed angmom dataset not created (done in other branch)
Aug 20, 2024
d2063f5
angmom and omega = 0
Sep 9, 2024
67181d1
WIP: gromacs chemfiles - chemfiles very slow!
Oct 9, 2024
57c9f03
WIP: GSD reader basic features working (no angmom!)
Oct 9, 2024
b88b1de
WIP: fixed naming issue
Oct 11, 2024
e862dad
WIP: added quaternion operations
Oct 11, 2024
b966f27
WIP: angular momentum and angular velocity in principal frame
Oct 11, 2024
e734237
WIP: fixed orientation hoomd
Oct 17, 2024
de33fd5
WIP: fixed average_func times
Oct 17, 2024
122ee75
WIP: removed thetas from quaternions
Oct 17, 2024
0c9a3e5
WIP: made GROMACS molecule identification orders of magnitude faster
Oct 17, 2024
2525c13
fixes #60
Oct 17, 2024
49bbda0
WIP: fixing the ClusterGrowth mode="largest" issue
Oct 17, 2024
ec40876
fixes #59
Oct 17, 2024
24740b8
fixes #58
Oct 17, 2024
1336a73
reordered folding and shifting coordinates. fixes #65
Oct 18, 2024
2d6db15
this branch fixes #37 and #47
Oct 18, 2024
91a30bf
added example data for gromacs
Oct 18, 2024
3580a00
added example for hoomd/gsd and gromacs
Oct 18, 2024
2158833
added frame.data(list). fixes #35
Oct 18, 2024
452acd9
fixed #68
Oct 18, 2024
01e047a
fixed small issue with returned list in frame.data()
Oct 18, 2024
3dc1359
implemented general distribution function. fixes #57
Oct 18, 2024
9582f9a
added example with image to evaluate.Dist
Oct 18, 2024
7e7517d
renamed gsdreader to hoomdreader
Oct 18, 2024
201aae0
WIP: added tests
Oct 18, 2024
bd5c62a
WIP: fixed pbc.kdtree() box boundary.
Oct 18, 2024
de7e865
tests updated
Oct 21, 2024
3529d23
fixed rotation for omegas
Oct 21, 2024
7d8540c
example to general distribution function added
Oct 21, 2024
41f4d03
added chemfiles dependency
Oct 21, 2024
3212ed8
restored tests
Oct 21, 2024
3fd5720
fixed tests
Oct 21, 2024
9244cb6
GROMACSDIR added to tests
Oct 21, 2024
8312de2
reduced plot test duration
Oct 21, 2024
0947bfb
Merge pull request #75 from amepproject/release-1.0.x
hechtprojects Oct 22, 2024
97fe406
fixed angmom in hoomdreader
Oct 23, 2024
4b25edd
specified warning in traj.load
Oct 23, 2024
3bbc370
minor cleanup
Oct 23, 2024
ae28a6a
Added the radius keyword to the frames in the hoomd dataset. Maybe we…
kai-luca Nov 5, 2024
273183a
removed diameter from hoomdreader, using radius instead.
Nov 8, 2024
ee2471a
Merge branch 'main' into gromacs_reader
kay-ro Nov 13, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 22 additions & 8 deletions amep/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,8 @@ def check_path(path: str, extension: str) -> tuple[str, str]:
'lammps',
'h5amep',
'field',
'gsd'
'hoomd',
'gromacs'
]
# maximum RAM usage in GB per CPU (used for parallelized methods)
MAXMEM = 1
Expand Down Expand Up @@ -771,7 +772,7 @@ def keys(self) -> list:

return datakeys
def data(
self, *args, ptype: int | None = None, zerofill: bool = False,
self, *args: str | tuple[list[str], ...], ptype: int | None = None, zerofill: bool = False,
return_keys: bool = False) -> tuple[list, np.ndarray]:
r'''
Returns the entire data frame for all particles or for
Expand All @@ -784,12 +785,15 @@ def data(
that start with "name", "value[*]" returns all datasets with any
number of characters in between the square brackets i.e. "value[1]",
"value[2]", ..., "value[any text with any length]".

Duplicate keys are removed.

Parameters
----------
*args : str
*args : str | tuple[list[str], ...]
Parameter keys. One wildcard character asterisk can be used, see
note above.
note above. Either multiple strings or lists of strings are
allowed, a combination should not be used.
ptype : int | list, optional
Particle type. Is internally converted to a list and all matching
ptypes are returned. The default is None.
Expand All @@ -811,10 +815,21 @@ def data(
data = None
datakeys = []

# allow lists of keys as input
islist=False
listresult=[]
for arg in args:
if isinstance(arg, (list, np.ndarray)):
islist=True
listresult.append(self.data(*arg, ptype = ptype, zerofill = zerofill, return_keys = return_keys))
if islist:
if len(args)==1:
return listresult[0]
return listresult

# return all data if no arguments are given
if len(args)==0:
args = self.keys

else:
# Transform list of all given keys by allowing semi-wildcard matches
# One asterisk * is allowed.
Expand All @@ -836,11 +851,10 @@ def data(
found_key = True
if not found_key:
raise KeyError(
f"""The key {arg} does not exist in the frame,
returning no data!"""
f"The key \"{arg}\" does not exist in the frame, returning no data!"
)
# remove duplicates
args = np.unique(extended_keys)
args = np.array(extended_keys)[np.sort(np.unique(extended_keys, return_index=True)[1])]

# loop through given keys
for i,key in enumerate(args):
Expand Down
219 changes: 212 additions & 7 deletions amep/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
# IMPORT MODULES
# =============================================================================
from packaging.version import Version
from collections.abc import Callable
import warnings
import numpy as np

Expand Down Expand Up @@ -3072,6 +3073,210 @@ def indices(self):
return self.__indices



class Dist(BaseEvaluation):
"""General distribution.
"""

def __init__(
self, traj: ParticleTrajectory | FieldTrajectory,
keys: str | list[str, ...], func: Callable | None = None, skip: float = 0.0,
nav: float = 10, nbins: int = 50, ptype: float | None = None,
ftype: str | None = None, logbins: bool = False,
xmin: float | None = None, xmax: float | None = None,
**kwargs):
r'''
Calculate the distribution of a user-defined key or keys.

Namely the components :math:`v_x, v_y, v_z`
as well as the magnitude :math:`v` of the velocity
and its square :math:`v^2`. It also
takes an average over several frames (= time average).

Parameters
----------
traj : Traj
Trajectory object.
skip : float, optional
Skip this fraction at the beginning of the trajectory.
The default is 0.0.
keys : str, list(str)
name keys, func=None, ...todo...
xmin : float | None, optional
Minimum value for the histogram. If None, then the
minimum value of the last frame will be used
xmax : float | None, optional
Maximum value for the histogram. If None, then the
maximum value of the last frame will be used
nav : int, optional
Number of frames to use for the average. The default is 10.
nbins : int, optional
Number of bins. The default is None.
ptype : float, optional
Particle type. The default is None.

Returns
-------
None

Examples
--------
>>> import amep
>>> path="/home/dormann/Documents/git_amep/examples/data/lammps.h5amep"
>>> traj= amep.load.traj(path)
>>> # distribution of the absolute velocity
>>> dist=amep.evaluate.Dist(traj, "v*", func=np.linalg.norm, axis=1, skip=0.5, logbins=True)
>>> # save results in hdf5 format
>>> dist.save("./eval/dist-eval.h5", name="velocitydistribution")

>>> fig,axs=amep.plot.new()
>>> axs.plot(dist.x, dist.xdist)
>>> axs.set_xlabel("Velocity")
>>> axs.set_ylabel("P(Velocity)")
>>> axs.semilogx()
>>> fig.savefig("/home/dormann/Documents/git_amep/doc/source/_static/images/evaluate/evaluate-Dist.png")

>>> # more examples:
>>> # distribution of the x-position
>>> dist=amep.evaluate.Dist(traj, "x", skip=0.5, logbins=True)
>>> # distribution of the angular velocity
>>> dist=amep.evaluate.Dist(traj, "omega*", func=np.linalg.norm, axis=1, skip=0.5, logbins=True)

.. image:: /_static/images/evaluate/evaluate-Dist.png
:width: 400
:align: center

'''
super(Dist, self).__init__()

if func is None:
func = lambda x: x

self.name = "dist"

self.__traj = traj
self.__keys = keys
self.__skip = skip
self.__nav = nav
self.__nbins = nbins
self.__ptype = ptype
self.__xmin = xmin
self.__xmax = xmax
self.__func = func
self.__logbins = logbins
self.__kwargs = kwargs

if self.__xmin is None or self.__xmax is None:
if isinstance(traj, FieldTrajectory):
minmaxdata=self.__traj[-1].data(self.__keys, ftype=self.__ftype)
else:
minmaxdata=self.__traj[-1].data(self.__keys, ptype=self.__ptype)
minmaxdata = func(minmaxdata, **kwargs)
if self.__xmin is None:
self.__xmin = np.min(minmaxdata)

if self.__xmax is None:
self.__xmax = np.max(minmaxdata)

self.__frames, res, self.__indices = average_func(
self.__compute, np.arange(self.__traj.nframes), skip=self.__skip,
nr=self.__nav, indices=True)

self.__times = self.__traj.times[self.__indices]
self.__xdist = res[0]
self.__x = res[1]

def __compute(self, ind):
r'''
Calculation for a single frame,

Parameters
----------
ind : int
Frame index.

Returns
-------
hist : np.ndarray
Histogram.
bins : np.ndarray
Bin positions. Same shape as hist.
'''
data=self.__traj[ind].data(self.__keys, ptype=self.__ptype)
data=self.__func(data, **self.__kwargs)

keyhist, keybins = distribution(
data, nbins=self.__nbins, xmin=self.__xmin, xmax=self.__xmax, logbins=self.__logbins)

return keyhist, keybins

@property
def xdist(self):
r'''
Time-averaged distribution of the magnitude of the velocity.

Returns
-------
np.ndarray
Distribution of the magnitude of the velocity.
'''
return self.__xdist

@property
def x(self):
r'''
Magnitude of the velocities.

Returns
-------
np.ndarray
Magnitude of the velocities.

'''
return self.__x

@property
def frames(self):
r'''
VelDist for each frame.

Returns
-------
np.ndarray
VelDist for each frame.

'''
return self.__frames

@property
def times(self):
r'''
Times at which the VelDist is evaluated.

Returns
-------
np.ndarray
Times at which the VelDist is evaluated.

'''
return self.__times

@property
def indices(self):
r'''
Indices of all frames for which the VelDist has been
evaluated.

Returns
-------
np.ndarray
Frame indices.

'''
return self.__indices



# =============================================================================
# CLUSTER ANALYSIS
# =============================================================================
Expand Down Expand Up @@ -3517,9 +3722,9 @@ def __init__(
>>> axs.plot(clg.times, clg.frames, label="largest")
>>> clg = amep.evaluate.ClusterGrowth(traj, mode="mean")
>>> axs.plot(clg.times, clg.frames, label="mean")
>>> clg = amep.evaluate.ClusterGrowth(ptraj, mode="mean", min_size=20)
>>> clg = amep.evaluate.ClusterGrowth(traj, mode="mean", min_size=20)
>>> axs.plot(clg.times, clg.frames, label="mean min 20")
>>> clg = amep.evaluate.ClusterGrowth(ptraj, mode="weighted mean")
>>> clg = amep.evaluate.ClusterGrowth(traj, mode="weighted mean")
>>> axs.plot(clg.times, clg.frames, label="weighted mean")
>>> axs.loglog()
>>> axs.legend()
Expand Down Expand Up @@ -3564,7 +3769,7 @@ def __init__(

if nav is None:
nav = self.__traj.nframes

# check mode
if self.__mode not in ["largest", "mean", "weighted mean"]:
raise ValueError(
Expand Down Expand Up @@ -3633,8 +3838,8 @@ def __compute_particles(self, frame) -> int | float:

# if no clusters are detected
if len(values) == 0:
values = 0.0
weights = 1.0
values = [0.0]
weights = [1.0]
else:
weights = values

Expand Down Expand Up @@ -3692,8 +3897,8 @@ def __compute_fields(self, frame):

# case if there is no cluster
if len(values) == 0:
values = 0.0
weights = 1.0
values = [0.0]
weights = [1.0]
else:
weights = values

Expand Down
Loading
Loading