Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…rrations into develop
  • Loading branch information
dmilkie committed Oct 16, 2023
2 parents 583cc02 + ccec2c3 commit 6196607
Show file tree
Hide file tree
Showing 11 changed files with 350 additions and 43 deletions.
16 changes: 14 additions & 2 deletions .github/workflows/docker_action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,22 @@ jobs:
cache-to: type=gha,mode=max
load: true

- name: Test
- name: Test IO
run: |
docker run --rm --gpus all ${{ steps.meta.outputs.tags }} "~/miniconda3/envs/ml/bin/python -m pytest -vvv --disable-warnings tests/test_io.py"
- name: Test embeddings
run: |
docker run --rm --gpus all ${{ steps.meta.outputs.tags }} "~/miniconda3/envs/ml/bin/python -m pytest -vvv --disable-warnings tests/test_embeddings.py"
- name: Test AO
run: |
docker run --rm --gpus all ${{ steps.meta.outputs.tags }} "~/miniconda3/envs/ml/bin/python -m pytest -vvv --disable-warnings tests/test_ao.py"
- name: Test synthetic datasets
run: |
docker run --rm --gpus all ${{ steps.meta.outputs.tags }} "~/miniconda3/envs/ml/bin/python -m pytest -vvv --disable-warnings tests/test_datasets.py"
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v5
Expand Down
17 changes: 17 additions & 0 deletions .run/test_datasets.run.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="test_datasets" type="tests" factoryName="Autodetect">
<module name="opticalaberrations" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="_new_additionalArguments" value="&quot;&quot;" />
<option name="_new_target" value="&quot;test_datasets&quot;" />
<option name="_new_targetType" value="&quot;PYTHON&quot;" />
<method v="2" />
</configuration>
</component>
17 changes: 17 additions & 0 deletions .run/test_embeddings.run.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="test_embeddings" type="tests" factoryName="Autodetect">
<module name="opticalaberrations" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="_new_additionalArguments" value="&quot;&quot;" />
<option name="_new_target" value="&quot;test_embeddings&quot;" />
<option name="_new_targetType" value="&quot;PYTHON&quot;" />
<method v="2" />
</configuration>
</component>
4 changes: 3 additions & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,7 @@
"tests"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
"python.testing.pytestEnabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.analysis.autoImportCompletions": true
}
40 changes: 34 additions & 6 deletions src/experimental.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,6 @@ def generate_embeddings(

samplepsfgen = SyntheticPSF(
psf_type=modelpsfgen.psf_type,
lls_excitation_profile=modelpsfgen.lls_excitation_profile,
psf_shape=sample.shape,
n_modes=model.output_shape[1],
lam_detection=wavelength,
Expand Down Expand Up @@ -1570,7 +1569,7 @@ def create_consensus_map(
xtiles: int,
new_zernikes_path: Path,
new_stdevs_path: Path,
consensus_stacks_path: Path,
consensus_stacks_path: Path, # .csv of optimized_stack_id
):
"""
1. Build a consensus isoplanatic map of the wavefront aberrations for each tile.
Expand Down Expand Up @@ -1718,7 +1717,6 @@ def create_consensus_map(
consensus_stacks.to_csv(consensus_stacks_path)
return optimized_volume, volume_used


@profile
def combine_tiles(
corrected_actuators_csv: Path,
Expand Down Expand Up @@ -1803,10 +1801,17 @@ def combine_tiles(
stack_preds = [] # build a list of prediction dataframes for each stack.
stack_stdevs = [] # build a list of standard deviations dataframes for each stack.
correction_scans = []
correction_scans_b = []
correction_scan_paths = []

for t, path in tqdm(enumerate(corrections), desc='Loading corrections', file=sys.stdout, unit=' image files'):
correction_base_path = str(path).replace('_tiles_predictions_aggregated_p2v_error.tif', '')
correction_scan_paths.append(Path(f'{correction_base_path}.tif'))
correction_scans.append(backend.load_sample(f'{correction_base_path}.tif'))
try:
correction_scans_b.append(backend.load_sample(utils.convert_path_to_other_cam(Path(f'{correction_base_path}.tif'))))
except Exception:
pass
stack_preds.append(
pd.read_csv(
f'{correction_base_path}_tiles_predictions_aggregated_clusters.csv',
Expand All @@ -1830,6 +1835,28 @@ def combine_tiles(
new_zernikes_path = Path(f"{output_base_path}_{postfix}_tiles_predictions.csv")
new_stdevs_path = Path(f"{output_base_path}_{postfix}_tiles_stdevs.csv")

# optimized_volume for Cam B
if len(correction_scans_b) > 0:
optimized_volume_b, volume_used = create_consensus_map(
org_cluster_map=org_cluster_map,
correction_scans=correction_scans_b,
stack_preds=stack_preds,
stack_stdevs=stack_stdevs,
zernikes_on_mirror=zernikes_on_mirror,
zernike_indices=zernike_indices,
window_size=predictions_settings['window_size'],
ztiles=ztiles,
ytiles=ytiles,
xtiles=xtiles,
new_zernikes_path=new_zernikes_path,
new_stdevs_path=new_stdevs_path,
consensus_stacks_path=consensus_stacks_path,
)
output_base_path_b = utils.convert_path_to_other_cam(correction_scan_paths[0])
imwrite(f"{output_base_path_b.with_suffix('')}_optimized.tif", optimized_volume_b.astype(np.float32))
logger.info(f"{output_base_path_b.with_suffix('')}_optimized.tif")

# optimized_volume for Cam A
optimized_volume, volume_used = create_consensus_map(
org_cluster_map=org_cluster_map,
correction_scans=correction_scans,
Expand All @@ -1845,9 +1872,13 @@ def combine_tiles(
new_stdevs_path=new_stdevs_path,
consensus_stacks_path=consensus_stacks_path,
)
imwrite(f"{output_base_path}_{postfix}_volume_used.tif", volume_used.astype(np.uint16))
imwrite(f"{output_base_path}_optimized.tif", optimized_volume.astype(np.float32))
logger.info(f"{output_base_path}_optimized.tif")

# aggregate consensus maps
imwrite(f"{output_base_path}_{postfix}.tif", correction_scans[0].astype(np.float32))
logger.info(f"{output_base_path}_{postfix}.tif")
with Path(f"{output_base_path}_{postfix}_tiles_predictions_settings.json").open('w') as f:
ujson.dump(
predictions_settings,
Expand All @@ -1874,9 +1905,6 @@ def combine_tiles(
)

# aggregate optimized maps

imwrite(f"{output_base_path}_{postfix}_volume_used.tif", volume_used.astype(np.uint16))
imwrite(f"{output_base_path}_optimized.tif", optimized_volume.astype(np.float32))
with Path(f"{output_base_path}_optimized_tiles_predictions_settings.json").open('w') as f:
ujson.dump(
predictions_settings,
Expand Down
62 changes: 47 additions & 15 deletions src/multipoint_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ def beads(
photons: int = 1,
object_size: Optional[int] = 0,
num_objs: int = 1,
fill_radius: float = .35,
fill_radius: float = .75, # .75 will be roughly a bit inside of the Tukey window
):
"""
Args:
image_shape: image size
object_size: bead size (0 for diffraction-limited beads)
num_objs: number of beads
fill_radius: (0 for a single bead at the center of the image)
fill_radius: Fractional (0 for a single bead at the center of the image)
"""
np.random.seed(os.getpid()+np.random.randint(low=0, high=10**6))
reference = np.zeros(image_shape)
Expand All @@ -138,13 +138,20 @@ def beads(
),
).astype(np.float32) * photons
else:
# object_size = 0 diffraction-limited
if fill_radius > 0:
# make uniform distribution in polar
r = np.sqrt(np.random.random(1)) * fill_radius * (image_shape[2] - 1) * 0.5
theta = np.random.random(1) * 2 * np.pi
x = r * np.cos(theta) + (image_shape[2] - 1) * 0.5
y = r * np.sin(theta) + (image_shape[1] - 1) * 0.5
reference[
rng.integers(int(image_shape[0] * (.5 - fill_radius)), int(image_shape[0] * (.5 + fill_radius))),
rng.integers(int(image_shape[1] * (.5 - fill_radius)), int(image_shape[1] * (.5 + fill_radius))),
rng.integers(int(image_shape[2] * (.5 - fill_radius)), int(image_shape[2] * (.5 + fill_radius))),
rng.integers(5, int(image_shape[0] - 5)),
np.round(y).astype(np.int32),
np.round(x).astype(np.int32)
] = photons
else:
# bead at center
reference[image_shape[0] // 2, image_shape[1] // 2, image_shape[2] // 2] = photons

return reference
Expand Down Expand Up @@ -292,6 +299,8 @@ def sim(
psf_type=gen.psf_type,
)

return inputs


def create_synthetic_sample(
filename: str,
Expand Down Expand Up @@ -395,16 +404,37 @@ def create_synthetic_sample(
outdir = outdir / f"npoints_{npoints}"
outdir.mkdir(exist_ok=True, parents=True)

try: # check if file already exists and not corrupted
for e in embedding_option:
path = Path(f"{outdir/e}/{filename}")

with open(path.with_suffix('.json')) as f:
ujson.load(f)

with TiffFile(path.with_suffix('.tif')) as tif:
tif.asarray()
except Exception as e:
if emb:
try: # check if file already exists and not corrupted
for e in embedding_option:
path = Path(f"{outdir/e}/{filename}")

with open(path.with_suffix('.json')) as f:
ujson.load(f)

with TiffFile(path.with_suffix('.tif')) as tif:
tif.asarray()
except Exception as e:
sim(
filename=filename,
reference=reference,
model_psf_shape=reference.shape,
outdir=outdir,
phi=phi,
gen=gen,
upsampled_gen=upsampled_gen,
npoints=npoints,
photons=photons,
emb=emb,
embedding_option=embedding_option,
random_crop=random_crop,
noise=noise,
normalize=normalize,
alpha_val=alpha_val,
phi_val=phi_val,
lls_defocus_offset=lls_defocus_offset,
)
else:
sim(
filename=filename,
reference=reference,
Expand All @@ -425,6 +455,8 @@ def create_synthetic_sample(
lls_defocus_offset=lls_defocus_offset,
)

return reference


def parse_args(args):
parser = cli.argparser()
Expand Down
4 changes: 3 additions & 1 deletion src/psf_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,8 @@ def sim(
psf_type=gen.psf_type,
)

return inputs


def create_synthetic_sample(
filename: str,
Expand Down Expand Up @@ -189,7 +191,7 @@ def create_synthetic_sample(

outdir.mkdir(exist_ok=True, parents=True)

sim(
return sim(
filename=filename,
outdir=outdir,
gen=gen,
Expand Down
44 changes: 27 additions & 17 deletions src/summarize.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,34 +75,44 @@ def concat_U16_tiffs(source_files=list([]), dst: Path = None, ch_two=None, drop_
print(f"Saved:\n{dst.resolve()}\n")


folder = Path(r'U:\Data\TestsForThayer\20231006_fish_arp_mNG\exp1_tailend\rotated')
folder = Path(r'U:\Data\TestsForThayer\20231013_fish\exp1_notochord\rotated')
cam_A = 'CamA'
cam_B = 'CamB'

before_files = folder.glob(f'*{cam_A}*stack0000_*00??t.tif')
optimized_files = folder.glob(f'*{cam_A}*stack0000_*00??*optimized.tif')
vol_used_files = folder.glob(f'*{cam_A}*stack0000_*00??*volume_used.tif')
before_files = list(folder.glob(f'*{cam_A}*stack0000_*00??t.tif'))
before_files_b = list(folder.glob(f'*{cam_B}*stack0000_*00??t.tif'))
optimized_files = list(folder.glob(f'*{cam_A}*stack0000_*00??*optimized.tif'))
optimized_files_b = list(folder.glob(f'*{cam_B}*stack0000_*00??*optimized.tif'))
vol_used_files = list(folder.glob(f'*{cam_A}*stack0000_*00??*volume_used.tif'))
patterns_to_drop = list(['after_three'])

dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_before_hyperstack.tif")

dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_before_hyperstack_{cam_A}.tif")
concat_U16_tiffs(source_files=before_files, dst=dst, drop_patterns=patterns_to_drop)
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_before_hyperstack_{cam_B}.tif")
concat_U16_tiffs(source_files=before_files_b, dst=dst, drop_patterns=patterns_to_drop)

before_files = folder.glob(f'*{cam_A}*stack0000_*00??t.tif')
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_before_vs_optimized_hyperstack.tif")
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_before_vs_optimized_hyperstack_{cam_A}.tif")
concat_U16_tiffs(source_files=before_files, dst=dst, drop_patterns=patterns_to_drop, ch_two=optimized_files)
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_before_vs_optimized_hyperstack_{cam_B}.tif")
concat_U16_tiffs(source_files=before_files_b, dst=dst, drop_patterns=patterns_to_drop, ch_two=optimized_files_b)

dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_optimized_hyperstack.tif")
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_optimized_hyperstack_{cam_A}.tif")
concat_U16_tiffs(source_files=optimized_files, dst=dst, drop_patterns=patterns_to_drop)
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_optimized_hyperstack_{cam_B}.tif")
concat_U16_tiffs(source_files=optimized_files_b, dst=dst, drop_patterns=patterns_to_drop)

dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_volume_used_hyperstack.tif")
concat_U16_tiffs(source_files=vol_used_files, dst=dst, drop_patterns=patterns_to_drop)


# make consensus_map (aka wavefronts over time)
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_consensus_map.tif")
consensus_clusters = folder.glob('*_combined_tiles_predictions_consensus_clusters.tif')
consensus_clusters_wavefronts = folder.glob('*_combined_tiles_predictions_consensus_clusters_wavefronts.tif')
consensus_clusters_psfs = folder.glob('*_combined_tiles_predictions_consensus_clusters_psfs.tif')
patterns_to_drop = list(['after_three', 'pythons_great'])
dst = Path(f"{folder}\\_summary\\{folder.parts[-2]}_consensus_map.tif")

# filter files via "patterns_to_drop", then sort by modified time.
consensus_clusters = [x for x in consensus_clusters if all(y not in str(x) for y in patterns_to_drop)]
consensus_clusters.sort(key=lambda x: os.path.getmtime(x))

Expand All @@ -112,25 +122,25 @@ def concat_U16_tiffs(source_files=list([]), dst: Path = None, ch_two=None, drop_
consensus_clusters_psfs = [x for x in consensus_clusters_psfs if all(y not in str(x) for y in patterns_to_drop)]
consensus_clusters_psfs.sort(key=lambda x: os.path.getmtime(x))

t_size = len(consensus_clusters)
t_size = len(consensus_clusters) # number of time points
sample = TiffFile(consensus_clusters[0])
z_size = len(sample.pages) # number of pages in the file
page = sample.pages[0] # get shape and dtype of image in first page
z_size = len(sample.pages) # number of pages in the file
page = sample.pages[0] # get shape
(y_size, x_size, c_size) = page.shape # c_size = 3 for color image

# vertically combine these two stacks.
# vertically combine "consensus_clusters" and "psfs".
hyperstack = np.zeros(shape=[t_size, z_size, y_size * 2, x_size, c_size], dtype=np.ubyte)
hyperstack = np.squeeze(hyperstack)

for i in range(len(consensus_clusters)):
with TiffFile(consensus_clusters[i]) as tif:
print(
f"Concatenating {i + 1} out of {t_size} ({len(sample.pages)} x {sample.pages[0].shape[0]} x {sample.pages[0].shape[1]}) {tif.filename}")
hyperstack[i, :, :y_size] = tif.asarray()
hyperstack[i, :, :y_size] = tif.asarray() # place into top of image

with TiffFile(consensus_clusters_psfs[i]) as tif:
# since this stack only has 1 slice per z slab, we repeat to fill out.
hyperstack[i, :, y_size:] = np.repeat(tif.asarray(), z_size//len(tif.pages), axis=0)
hyperstack[i, :, y_size:] = np.repeat(tif.asarray(), z_size//len(tif.pages), axis=0) # place into bottom of image.
print(f"Concatenating {i+1} out of {t_size} ({len(sample.pages)} x {sample.pages[0].shape[0]} x {sample.pages[0].shape[1]}) {tif.filename}")

dst.parent.mkdir(parents=True, exist_ok=True)
Expand All @@ -146,4 +156,4 @@ def concat_U16_tiffs(source_files=list([]), dst: Path = None, ch_two=None, drop_
},
)

print(f"\nSaved:\n{dst.resolve()}")
print(f"\nSaved:\n{dst.resolve()}")
Loading

0 comments on commit 6196607

Please sign in to comment.