Skip to content

Commit

Permalink
Merge pull request #179 from bbfrederick/sharedmemupdate
Browse files Browse the repository at this point in the history
First crack at new shared memory routines
  • Loading branch information
bbfrederick authored Nov 3, 2024
2 parents b4ec721 + 119135e commit 8089f34
Show file tree
Hide file tree
Showing 7 changed files with 158 additions and 121 deletions.
67 changes: 20 additions & 47 deletions rapidtide/tests/test_delayestimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import rapidtide.peakeval as tide_peakeval
import rapidtide.resample as tide_resample
import rapidtide.simfuncfit as tide_simfuncfit
import rapidtide.util as tide_util

try:
import mkl
Expand All @@ -39,34 +40,6 @@
mklexists = False


def numpy2shared(inarray, thetype):
thesize = inarray.size
theshape = inarray.shape
if thetype == np.float64:
inarray_shared = mp.RawArray("d", inarray.reshape(thesize))
else:
inarray_shared = mp.RawArray("f", inarray.reshape(thesize))
inarray = np.frombuffer(inarray_shared, dtype=thetype, count=thesize)
inarray.shape = theshape
return inarray


def allocshared(theshape, thetype):
thesize = int(1)
if not isinstance(theshape, (list, tuple)):
thesize = theshape
else:
for element in theshape:
thesize *= int(element)
if thetype == np.float64:
outarray_shared = mp.RawArray("d", thesize)
else:
outarray_shared = mp.RawArray("f", thesize)
outarray = np.frombuffer(outarray_shared, dtype=thetype, count=thesize)
outarray.shape = theshape
return outarray, outarray_shared, theshape


def multisine(timepoints, parameterlist):
output = timepoints * 0.0
for element in parameterlist:
Expand Down Expand Up @@ -144,7 +117,7 @@ def test_delayestimation(displayplots=False, debug=False):
plt.show()

threshval = pedestal / 4.0
waveforms = numpy2shared(waveforms, np.float64)
waveforms, waveforms_shm = tide_util.numpy2shared(waveforms, np.float64)

referencetc = tide_resample.doresample(
timepoints, waveforms[refnum, :], oversamptimepoints, method=interptype
Expand All @@ -170,8 +143,8 @@ def test_delayestimation(displayplots=False, debug=False):
dummy, trimmedcorrscale, dummy = theCorrelator.getfunction()
corroutlen = np.shape(trimmedcorrscale)[0]
internalvalidcorrshape = (numlocs, corroutlen)
corrout, dummy, dummy = allocshared(internalvalidcorrshape, np.float64)
meanval, dummy, dummy = allocshared((numlocs), np.float64)
corrout, corrout_shm = tide_util.allocshared(internalvalidcorrshape, np.float64)
meanval, meanval_shm = tide_util.allocshared((numlocs), np.float64)
if debug:
print("corrout shape:", corrout.shape)
print("theCorrelator: corroutlen=", corroutlen)
Expand Down Expand Up @@ -210,21 +183,21 @@ def test_delayestimation(displayplots=False, debug=False):
peakfittype=peakfittype,
)

lagtc, dummy, dummy = allocshared(waveforms.shape, np.float64)
fitmask, dummy, dummy = allocshared((numlocs), "uint16")
failreason, dummy, dummy = allocshared((numlocs), "uint32")
lagtimes, dummy, dummy = allocshared((numlocs), np.float64)
lagstrengths, dummy, dummy = allocshared((numlocs), np.float64)
lagsigma, dummy, dummy = allocshared((numlocs), np.float64)
gaussout, dummy, dummy = allocshared(internalvalidcorrshape, np.float64)
windowout, dummy, dummy = allocshared(internalvalidcorrshape, np.float64)
rvalue, dummy, dummy = allocshared((numlocs), np.float64)
r2value, dummy, dummy = allocshared((numlocs), np.float64)
fitcoff, dummy, dummy = allocshared((waveforms.shape), np.float64)
fitNorm, dummy, dummy = allocshared((waveforms.shape), np.float64)
R2, dummy, dummy = allocshared((numlocs), np.float64)
movingsignal, dummy, dummy = allocshared(waveforms.shape, np.float64)
filtereddata, dummy, dummy = allocshared(waveforms.shape, np.float64)
lagtc, lagtc_shm = tide_util.allocshared(waveforms.shape, np.float64)
fitmask, fitmask_shm = tide_util.allocshared((numlocs), "uint16")
failreason, failreason_shm = tide_util.allocshared((numlocs), "uint32")
lagtimes, lagtimes_shm = tide_util.allocshared((numlocs), np.float64)
lagstrengths, lagstrengths_shm = tide_util.allocshared((numlocs), np.float64)
lagsigma, lagsigma_shm = tide_util.allocshared((numlocs), np.float64)
gaussout, gaussout_shm = tide_util.allocshared(internalvalidcorrshape, np.float64)
windowout, windowout_shm = tide_util.allocshared(internalvalidcorrshape, np.float64)
rvalue, rvalue_shm = tide_util.allocshared((numlocs), np.float64)
r2value, r2value_shm = tide_util.allocshared((numlocs), np.float64)
fitcoff, fitcoff_shm = tide_util.allocshared((waveforms.shape), np.float64)
fitNorm, fitNorm_shm = tide_util.allocshared((waveforms.shape), np.float64)
R2, R2_shm = tide_util.allocshared((numlocs), np.float64)
movingsignal, movingsignal_shm = tide_util.allocshared(waveforms.shape, np.float64)
filtereddata, filtereddata_shm = tide_util.allocshared(waveforms.shape, np.float64)

for nprocs in [4, 1]:
# call correlationpass
Expand Down Expand Up @@ -357,7 +330,7 @@ def test_delayestimation(displayplots=False, debug=False):
ax.legend()
plt.show()

filteredwaveforms, dummy, dummy = allocshared(waveforms.shape, np.float64)
filteredwaveforms, filteredwaveforms_shm = tide_util.allocshared(waveforms.shape, np.float64)
for i in range(numlocs):
filteredwaveforms[i, :] = theprefilter.apply(Fs, waveforms[i, :])

Expand Down
60 changes: 60 additions & 0 deletions rapidtide/tests/test_sharedmem.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2024 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np

import rapidtide.util as tide_util


def test_numpy2shared(debug=False):
vectorlen = 1000
for intype in [np.float32, np.float64]:
sourcevector = np.random.normal(size=vectorlen).astype(intype)
if debug:
print(f"{intype=}, {sourcevector.size=}, {sourcevector.dtype=}")
for outtype in [np.float32, np.float64]:

destvector, shm = tide_util.numpy2shared(sourcevector, outtype)
if debug:
print(f"\t{outtype=}, {destvector.size=}, {destvector.dtype=}")

# check everything
assert destvector.dtype == outtype
assert destvector.size == sourcevector.size
np.testing.assert_almost_equal(sourcevector, destvector, 3)

# clean up
tide_util.cleanup_shm(shm)


def test_allocshared(debug=False):
datashape = (10, 10, 10)
for outtype in [np.float32, np.float64]:
destarray, shm = tide_util.allocshared(datashape, outtype)
if debug:
print(f"{outtype=}, {destarray.size=}, {destarray.dtype=}")

# check everything
assert destarray.dtype == outtype
assert destarray.size == np.prod(datashape)

# clean up if needed
tide_util.cleanup_shm(shm)


if __name__ == "__main__":
test_numpy2shared(debug=True)
test_allocshared(debug=True)
47 changes: 23 additions & 24 deletions rapidtide/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#
import bisect
import logging
import multiprocessing as mp
import os
import platform
import resource
Expand All @@ -27,6 +26,7 @@
import sys
import time
from datetime import datetime
from multiprocessing import RawArray, shared_memory

import matplotlib.pyplot as plt
import numpy as np
Expand Down Expand Up @@ -1003,29 +1003,28 @@ def comparehappyruns(root1, root2, debug=False):


# shared memory routines
def numpy2shared(inarray, thetype):
thesize = inarray.size
theshape = inarray.shape
if thetype == np.float64:
inarray_shared = mp.RawArray("d", inarray.reshape(thesize))
else:
inarray_shared = mp.RawArray("f", inarray.reshape(thesize))
inarray = np.frombuffer(inarray_shared, dtype=thetype, count=thesize)
inarray.shape = theshape
return inarray
def numpy2shared(inarray, theouttype):
# Create a shared memory block to store the array data
outnbytes = np.dtype(theouttype).itemsize * inarray.size
shm = shared_memory.SharedMemory(create=True, size=outnbytes)
inarray_shared = np.ndarray(inarray.shape, dtype=theouttype, buffer=shm.buf)
np.copyto(inarray_shared, inarray) # Copy data to shared memory array
return inarray_shared, shm # Return both the array and the shared memory object


def allocshared(theshape, thetype):
thesize = int(1)
if not isinstance(theshape, (list, tuple)):
thesize = theshape
else:
for element in theshape:
thesize *= int(element)
if thetype == np.float64:
outarray_shared = mp.RawArray("d", thesize)
else:
outarray_shared = mp.RawArray("f", thesize)
outarray = np.frombuffer(outarray_shared, dtype=thetype, count=thesize)
outarray.shape = theshape
return outarray, outarray_shared, theshape
# Calculate size based on shape
thesize = np.prod(theshape)
# Determine the data type size
dtype_size = np.dtype(thetype).itemsize
# Create a shared memory block of the required size
shm = shared_memory.SharedMemory(create=True, size=thesize * dtype_size)
outarray = np.ndarray(theshape, dtype=thetype, buffer=shm.buf)
return outarray, shm # Return both the array and the shared memory object


def cleanup_shm(shm):
# Cleanup
if shm is not None:
shm.close()
shm.unlink()
Loading

0 comments on commit 8089f34

Please sign in to comment.