You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Then I copied the find_principal_directions.py to the Colab like this:
from dataloader import *
import numpy as np
import tensorflow as tf
import principal_directions.classifier
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
dlat = ex.features.feature['dlat'].bytes_list.value[0]
lat = ex.features.feature['lat'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape), np.fromstring(dlat, np.float32), np.fromstring(lat, np.float32)
class Predictions:
def __init__(self, cfg, minibatch_gpu):
self.minibatch_size = minibatch_gpu
self.cfg = cfg
def evaluate(self, logger, mapping, decoder, lod, attrib_idx):
result_expr = []
rnd = np.random.RandomState(5)
with tf.Graph().as_default(), tf.Session() as sess:
ds = tf.data.TFRecordDataset("principal_directions/generated_data.000")
ds = ds.batch(self.minibatch_size)
batch = ds.make_one_shot_iterator().get_next()
classifier = principal_directions.classifier.make_classifier(attrib_idx)
i = 0
while True:
try:
records = sess.run(batch)
images = []
dlats = []
lats = []
for r in records:
im, dlat, lat = parse_tfrecord_np(r)
# plt.imshow(im.transpose(1, 2, 0), interpolation='nearest')
# plt.show()
images.append(im)
dlats.append(dlat)
lats.append(lat)
images = np.stack(images)
dlats = np.stack(dlats)
lats = np.stack(lats)
logits = classifier.run(images, None, num_gpus=1, assume_frozen=True)
logits = torch.tensor(logits)
predictions = torch.softmax(torch.cat([logits, -logits], dim=1), dim=1)
result_dict = dict(latents=lats, dlatents=dlats)
result_dict[attrib_idx] = predictions.cpu().numpy()
result_expr.append(result_dict)
i += 1
except tf.errors.OutOfRangeError:
break
results = {key: np.concatenate([value[key] for value in result_expr], axis=0) for key in result_expr[0].keys()}
np.save("principal_directions/wspace_att_%d" % attrib_idx, results)
def main(cfg, logger):
torch.cuda.set_device(0)
model = Model(
startf=cfg.MODEL.START_CHANNEL_COUNT,
layer_count=cfg.MODEL.LAYER_COUNT,
maxf=cfg.MODEL.MAX_CHANNEL_COUNT,
latent_size=cfg.MODEL.LATENT_SPACE_SIZE,
truncation_psi=cfg.MODEL.TRUNCATIOM_PSI,
truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF,
mapping_layers=cfg.MODEL.MAPPING_LAYERS,
channels=cfg.MODEL.CHANNELS,
generator=cfg.MODEL.GENERATOR,
encoder=cfg.MODEL.ENCODER)
model.cuda(0)
model.eval()
model.requires_grad_(False)
decoder = model.decoder
encoder = model.encoder
mapping_tl = model.mapping_tl
mapping_fl = model.mapping_fl
dlatent_avg = model.dlatent_avg
logger.info("Trainable parameters generator:")
count_parameters(decoder)
logger.info("Trainable parameters discriminator:")
count_parameters(encoder)
arguments = dict()
arguments["iteration"] = 0
model_dict = {
'discriminator_s': encoder,
'generator_s': decoder,
'mapping_tl_s': mapping_tl,
'mapping_fl_s': mapping_fl,
'dlatent_avg': dlatent_avg
}
checkpointer = Checkpointer(cfg,
model_dict,
{},
logger=logger,
save=False)
checkpointer.load()
model.eval()
layer_count = cfg.MODEL.LAYER_COUNT
logger.info("Extracting attributes")
decoder = nn.DataParallel(decoder)
indices = [0, 1, 2, 3, 4, 10, 11, 17, 19]
with torch.no_grad():
p = Predictions(cfg, minibatch_gpu=4)
for i in indices:
p.evaluate(logger, mapping_fl, decoder, cfg.DATASET.MAX_RESOLUTION_LEVEL - 2, i)
if __name__ == "__main__":
gpu_count = 1
run(main, get_cfg_defaults(), description='StyleGAN', default_config='configs/celeba.yaml',
world_size=gpu_count, write_log=False)
Here are the logs when I run each of the code blocks respectively:
Collecting tensorflow-gpu==1.10
Downloading https://files.pythonhosted.org/packages/64/ca/830b7cedb073ae264d215d51bd18d7cff7a2a47e39d79f6fa23edae17bb2/tensorflow_gpu-1.10.0-cp36-cp36m-manylinux1_x86_64.whl (253.2MB)
|████████████████████████████████| 253.3MB 52kB/s
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (0.8.1)
Collecting numpy<=1.14.5,>=1.13.3
Downloading https://files.pythonhosted.org/packages/68/1e/116ad560de97694e2d0c1843a7a0075cc9f49e922454d32f49a80eb6f1f2/numpy-1.14.5-cp36-cp36m-manylinux1_x86_64.whl (12.2MB)
|████████████████████████████████| 12.2MB 38.8MB/s
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (1.1.0)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (0.34.2)
Requirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (0.3.3)
Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (3.12.4)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (1.31.0)
Collecting tensorboard<1.11.0,>=1.10.0
Downloading https://files.pythonhosted.org/packages/c6/17/ecd918a004f297955c30b4fffbea100b1606c225dbf0443264012773c3ff/tensorboard-1.10.0-py3-none-any.whl (3.3MB)
|████████████████████████████████| 3.3MB 44.2MB/s
Collecting setuptools<=39.1.0
Downloading https://files.pythonhosted.org/packages/8c/10/79282747f9169f21c053c562a0baa21815a8c7879be97abd930dbcf862e8/setuptools-39.1.0-py2.py3-none-any.whl (566kB)
|████████████████████████████████| 573kB 42.4MB/s
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (1.15.0)
Requirement already satisfied: absl-py>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.10) (0.9.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.11.0,>=1.10.0->tensorflow-gpu==1.10) (3.2.2)
Requirement already satisfied: werkzeug>=0.11.10 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.11.0,>=1.10.0->tensorflow-gpu==1.10) (1.0.1)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<1.11.0,>=1.10.0->tensorflow-gpu==1.10) (1.7.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.11.0,>=1.10.0->tensorflow-gpu==1.10) (3.1.0)
ERROR: xarray 0.15.1 has requirement numpy>=1.15, but you'll have numpy 1.14.5 which is incompatible.
ERROR: xarray 0.15.1 has requirement setuptools>=41.2, but you'll have setuptools 39.1.0 which is incompatible.
ERROR: umap-learn 0.4.6 has requirement numpy>=1.17, but you'll have numpy 1.14.5 which is incompatible.
ERROR: tifffile 2020.7.24 has requirement numpy>=1.15.1, but you'll have numpy 1.14.5 which is incompatible.
ERROR: tensorflow 2.3.0 has requirement numpy<1.19.0,>=1.16.0, but you'll have numpy 1.14.5 which is incompatible.
ERROR: tensorflow 2.3.0 has requirement tensorboard<3,>=2.3.0, but you'll have tensorboard 1.10.0 which is incompatible.
ERROR: spacy 2.2.4 has requirement numpy>=1.15.0, but you'll have numpy 1.14.5 which is incompatible.
ERROR: plotnine 0.6.0 has requirement numpy>=1.16.0, but you'll have numpy 1.14.5 which is incompatible.
ERROR: numba 0.48.0 has requirement numpy>=1.15, but you'll have numpy 1.14.5 which is incompatible.
ERROR: imgaug 0.2.9 has requirement numpy>=1.15.0, but you'll have numpy 1.14.5 which is incompatible.
ERROR: google-auth 1.17.2 has requirement setuptools>=40.3.0, but you'll have setuptools 39.1.0 which is incompatible.
ERROR: fastai 1.0.61 has requirement numpy>=1.15, but you'll have numpy 1.14.5 which is incompatible.
ERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.
ERROR: cvxpy 1.0.31 has requirement numpy>=1.15, but you'll have numpy 1.14.5 which is incompatible.
ERROR: blis 0.4.1 has requirement numpy>=1.15.0, but you'll have numpy 1.14.5 which is incompatible.
ERROR: astropy 4.0.1.post1 has requirement numpy>=1.16, but you'll have numpy 1.14.5 which is incompatible.
ERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.
Installing collected packages: numpy, tensorboard, setuptools, tensorflow-gpu
Found existing installation: numpy 1.18.5
Uninstalling numpy-1.18.5:
Successfully uninstalled numpy-1.18.5
Found existing installation: tensorboard 2.3.0
Uninstalling tensorboard-2.3.0:
Successfully uninstalled tensorboard-2.3.0
Found existing installation: setuptools 49.2.0
Uninstalling setuptools-49.2.0:
Successfully uninstalled setuptools-49.2.0
Successfully installed numpy-1.14.5 setuptools-39.1.0 tensorboard-1.10.0 tensorflow-gpu-1.10.0
WARNING: The following packages were previously imported in this runtime:
[numpy,pkg_resources]
You must restart the runtime in order to use newly installed versions.
[autoreload of pkg_resources._vendor.six failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
AttributeError: 'NoneType' object has no attribute 'cStringIO'
]
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:882: UserWarning: add_newdoc was used on a pure-python object <function empty_like at 0x7fd680b3e7b8>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1239: UserWarning: add_newdoc was used on a pure-python object <function concatenate at 0x7fd680b3e8c8>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1313: UserWarning: add_newdoc was used on a pure-python object <function inner at 0x7fd680b3e9d8>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1519: UserWarning: add_newdoc was used on a pure-python object <function where at 0x7fd680b3eae8>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1596: UserWarning: add_newdoc was used on a pure-python object <function lexsort at 0x7fd680b3ebf8>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1704: UserWarning: add_newdoc was used on a pure-python object <function can_cast at 0x7fd680b3ed08>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1804: UserWarning: add_newdoc was used on a pure-python object <function min_scalar_type at 0x7fd680b3ee18>. Prefer to attach it directly to the source.
""")
/usr/local/lib/python3.6/dist-packages/numpy/add_newdocs.py:1873: UserWarning: add_newdoc was used on a pure-python object <function result_type at 0x7fd680b3ef28>. Prefer to attach it directly to the source.
""")
[autoreload of numpy failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
AttributeError: module 'numpy.core.multiarray' has no attribute 'newbuffer'
]
[autoreload of numpy.core failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name '_numpy_tester'
]
[autoreload of numpy.core.numerictypes failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
IndexError: string index out of range
]
[autoreload of numpy.core.numeric failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name 'TooHardError'
]
[autoreload of numpy.lib failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
NameError: name 'type_check' is not defined
]
[autoreload of numpy.matrixlib failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
NameError: name 'defmatrix' is not defined
]
[autoreload of numpy.linalg failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name '_numpy_tester'
]
[autoreload of numpy.lib.function_base failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name 'digitize'
]
[autoreload of numpy.fft failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name '_FFTCache'
]
[autoreload of numpy.polynomial failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name '_numpy_tester'
]
[autoreload of numpy.random failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name '_numpy_tester'
]
[autoreload of numpy.ma failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
ImportError: cannot import name '_numpy_tester'
]
[autoreload of numpy.ma.core failed: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/IPython/extensions/autoreload.py", line 247, in check
superreload(m, reload, self.old_objects)
AttributeError: module 'numpy' has no attribute 'rank'
]
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/pywrap_tensorflow.py in <module>()
57
---> 58 from tensorflow.python.pywrap_tensorflow_internal import *
59 from tensorflow.python.pywrap_tensorflow_internal import __version__
7 frames
ImportError: libcublas.so.9.0: cannot open shared object file: No such file or directory
During handling of the above exception, another exception occurred:
ImportError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/pywrap_tensorflow.py in <module>()
72 for some common reasons and solutions. Include the entire stack trace
73 above this error message when asking for help.""" % traceback.format_exc()
---> 74 raise ImportError(msg)
75
76 # pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
ImportError: Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "/usr/lib/python3.6/imp.py", line 243, in load_module
return load_dynamic(name, filename, file)
File "/usr/lib/python3.6/imp.py", line 343, in load_dynamic
return _load(spec)
ImportError: libcublas.so.9.0: cannot open shared object file: No such file or directory
Failed to load the native TensorFlow runtime.
See https://www.tensorflow.org/install/install_sources#common_installation_problems
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------
The text was updated successfully, but these errors were encountered:
ImportError: libcublas.so.9.0: cannot open shared object file: No such file or directory
It depends on tensorflow 1.10 which depends on some old by now CUDA and cuDNN runtime.
I do not know why there is conflict of numpy versions, though.
I've used the command
%pip install tensorflow-gpu==1.10
following your ReadMe.The command is run after
Then I copied the find_principal_directions.py to the Colab like this:
Here are the logs when I run each of the code blocks respectively:
The text was updated successfully, but these errors were encountered: