diff --git a/padertorch/contrib/tcl/speaker_embeddings/dvectors.py b/padertorch/contrib/tcl/speaker_embeddings/dvectors.py index 55ffef62..233b3c14 100644 --- a/padertorch/contrib/tcl/speaker_embeddings/dvectors.py +++ b/padertorch/contrib/tcl/speaker_embeddings/dvectors.py @@ -1,7 +1,3 @@ -""" -Resnet taken from torchvision.models -See https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py -""" import warnings import numpy as np diff --git a/padertorch/contrib/tcl/utils/augmentation.py b/padertorch/contrib/tcl/utils/augmentation.py index 692b9b29..b4e952e4 100644 --- a/padertorch/contrib/tcl/utils/augmentation.py +++ b/padertorch/contrib/tcl/utils/augmentation.py @@ -11,7 +11,6 @@ class AugmentationHelper: def __init__(self, augmentation_sets: Dict = None, - reverb_set: Union[lazy_dataset.Dataset, List] = None, p_augment: float = 0., p_reverb=None, augmentation_type: Union[str, Iterable] = ('noise', 'music', 'speech'), @@ -20,10 +19,10 @@ def __init__(self, target_key='speech_image' ): self.augmentation_dataset = augmentation_sets - for k, v in self.augmentation_dataset: + for k, v in self.augmentation_dataset.items(): if isinstance(v, list): self.augmentation_dataset[k] = lazy_dataset.concatenate(*v) - assert self.augmentation_dataset[k] is lazy_dataset.Dataset, \ + assert isinstance(self.augmentation_dataset[k], lazy_dataset.Dataset), \ f'expected dataset of type lazy_dataset.Dataset, got {repr(v)} for dataset {k}' self.p_augment = p_augment if p_reverb is None: