Skip to content

Commit

Permalink
fix(map): Add commands for env conditions and data collections
Browse files Browse the repository at this point in the history
  • Loading branch information
mikkelkp authored and chriswmackey committed Aug 28, 2024
1 parent 2fa35db commit 35b5eb7
Show file tree
Hide file tree
Showing 4 changed files with 275 additions and 15 deletions.
2 changes: 2 additions & 0 deletions ladybug_comfort/cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .sql import sql
from .map import map
from .mtx import mtx
from .datacollection import datacollection


# command group for all comfort extension commands.
Expand All @@ -20,6 +21,7 @@ def comfort():
comfort.add_command(sql)
comfort.add_command(map)
comfort.add_command(mtx)
comfort.add_command(datacollection, name='data-collection')


# add comfort sub-group to ladybug CLI
Expand Down
74 changes: 74 additions & 0 deletions ladybug_comfort/cli/datacollection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""Commands to work with data collections."""
import sys
import logging
import numpy as np
from pathlib import Path
import click
import json

from ladybug.datacollection import HourlyContinuousCollection, HourlyDiscontinuousCollection
from ladybug.header import Header
from ladybug.datautil import collections_to_csv


_logger = logging.getLogger(__name__)


@click.group(help='Commands to work with data collections.')
def datacollection():
pass


@datacollection.command('folder-to-datacollections')
@click.argument(
'folder', type=click.Path(exists=True, dir_okay=True, resolve_path=True)
)
@click.argument(
'data-type', type=click.Path(exists=True, dir_okay=False, resolve_path=True)
)
@click.option(
'--sub-folder', '-sf', type=click.STRING, default='datacollections',
show_default=True
)
def folder_to_datacollections(folder, data_type, sub_folder):
"""Read an npy file and convert every row to a data collection.
The data collections will be saved as CSV files in subfolder.
\b
Args:
npy-file: Path to npy file.
data-type: A JSON file with the data type.
grid-name: The name of the grid. This is used in the metadata of the
header.
"""
with open(Path(folder, 'grids_info.json')) as json_file:
grid_list = json.load(json_file)
with open(data_type) as json_file:
data_header = Header.from_dict(json.load(json_file))
a_per = data_header.analysis_period
continuous = True if a_per.st_hour == 0 and a_per.end_hour == 23 else False
if not continuous:
dates = a_per.datetimes
try:
for grid in grid_list:
grid_name = grid['full_id'] if 'full_id' in grid else 'id'
metadata = {'grid': grid_name}
grid_file = Path(folder, '{}.npy'.format(grid_name))
data_matrix = np.load(grid_file).tolist()
grid_data = []
for i, row in enumerate(data_matrix):
header = data_header.duplicate()
header.metadata = metadata.copy()
header.metadata['sensor_index'] = i
data = HourlyContinuousCollection(header, row) if continuous else \
HourlyDiscontinuousCollection(header, row, dates)
grid_data.append(data)

file_name = grid_name + '.csv'
collections_to_csv(grid_data, Path(folder, sub_folder), file_name)
except Exception:
_logger.exception('Failed to convert folder of files to data collections.')
sys.exit(1)
else:
sys.exit(0)
107 changes: 107 additions & 0 deletions ladybug_comfort/cli/map.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,17 @@
ThermalCondition, ThermalConditionElevenPoint
from ladybug.datatype.temperaturedelta import OperativeTemperatureDelta
from ladybug.datatype.fraction import Fraction
from ladybug.datatype.temperature import AirTemperature, \
MeanRadiantTemperature, RadiantTemperature
from ladybug.datatype.temperaturedelta import RadiantTemperatureDelta
from ladybug.datatype.fraction import RelativeHumidity

from ladybug_comfort.map.irr import irradiance_contrib_map
from ladybug_comfort.map.mrt import shortwave_mrt_map, longwave_mrt_map
from ladybug_comfort.map.air import air_map
from ladybug_comfort.map.tcp import tcp_model_schedules, tcp_total
from ladybug_comfort.map._enclosure import _parse_enclosure_info, _values_to_data
from ladybug_comfort.map._helper import restore_original_distribution
from ladybug_comfort.collection.pmv import PMV, _PMVnoSET
from ladybug_comfort.collection.adaptive import Adaptive, PrevailingTemperature
from ladybug_comfort.collection.utci import UTCI
Expand All @@ -36,6 +41,7 @@
load_pmv_par_str, load_adaptive_par_str, load_utci_par_str, \
load_solarcal_par_str, thermal_map_csv, _data_to_ill, set_smallest_dtype


_logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -920,6 +926,107 @@ def tcp(condition_csv, enclosure_info, schedule, occ_schedule_json, folder, log_
sys.exit(0)


@map.command('restructure-env-conditions')
@click.argument(
'folder', type=click.Path(exists=True, dir_okay=True, resolve_path=True)
)
@click.argument(
'dest-folder', type=click.Path(exists=False, dir_okay=True, resolve_path=True)
)
@click.argument(
'sub-path', type=click.STRING
)
def restructure_env_conditions(folder, dest_folder, sub_path):
"""Read an npy file and convert every row to a data collection.
This command reads a NumPy array from a npy file and sends it to stdout.
\b
Args:
folder: Folder with environmental conditions (initial results).
dest_folder: Destination folder for writing the results.
sub_path: Sub path for the metric (mrt, air_temperature, longwave_mrt,
shortwave_mrt, rel_humidity)-
"""
try:
source_folder = os.path.join(folder, sub_path)
dest_folder = os.path.join(folder, 'final', sub_path)
if sub_path == 'mrt':
source_folders = [os.path.join(folder, 'longwave_mrt'),
os.path.join(folder, 'shortwave_mrt')]
dest_folders = [os.path.join(folder, 'final', 'longwave_mrt'),
os.path.join(folder, 'final', 'shortwave_mrt')]
else:
assert os.path.isdir(source_folder), \
'Metric "{}" does not exist for this comfort study.'.format(sub_path)
source_folders, dest_folders = [source_folder], [dest_folder]

# restructure the results to align with the sensor grids
dist_info = os.path.join(folder, '_redist_info.json')
for src_f, dst_f in zip(source_folders, dest_folders):
if not os.path.isdir(dst_f):
os.makedirs(dst_f)
restore_original_distribution(
src_f, dst_f, extension='csv', dist_info=dist_info,
output_extension='csv', as_text=True, fmt='%.12f',
delimiter='comma')
grid_info_src = os.path.join(folder, 'grids_info.json')
grid_info_dst = os.path.join(dst_f, 'grids_info.json')
shutil.copyfile(grid_info_src, grid_info_dst)
data_header = create_result_header(folder, os.path.split(dst_f)[-1])
result_info_path = os.path.join(dst_f, 'results_info.json')
with open(result_info_path, 'w') as fp:
json.dump(data_header.to_dict(), fp, indent=4)
# if MRT was requested, sum together the longwave and shortwave
if sub_path == 'mrt':
sum_matrices(dest_folders[0], dest_folders[1], dest_folder)
data_header = create_result_header(folder, sub_path)
result_info_path = os.path.join(dest_folder, 'results_info.json')
with open(result_info_path, 'w') as fp:
json.dump(data_header.to_dict(), fp, indent=4)
except Exception:
_logger.exception('Failed to restructure environmental conditions.')
sys.exit(1)
else:
sys.exit(0)


def create_result_header(env_conds, sub_path):
"""Create a DataCollection Header for a given metric."""
with open(os.path.join(env_conds, 'results_info.json')) as json_file:
base_head = Header.from_dict(json.load(json_file))
if sub_path == 'mrt':
return Header(MeanRadiantTemperature(), 'C', base_head.analysis_period)
elif sub_path == 'air_temperature':
return Header(AirTemperature(), 'C', base_head.analysis_period)
elif sub_path == 'longwave_mrt':
return Header(RadiantTemperature(), 'C', base_head.analysis_period)
elif sub_path == 'shortwave_mrt':
return Header(RadiantTemperatureDelta(), 'dC', base_head.analysis_period)
elif sub_path == 'rel_humidity':
return Header(RelativeHumidity(), '%', base_head.analysis_period)


def sum_matrices(mtxs_1, mtxs_2, dest_dir):
"""Sum together matrices of two folders."""
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
for mtx_file in os.listdir(mtxs_1):
if mtx_file.endswith('.csv'):
mtx_file1 = os.path.join(mtxs_1, mtx_file)
mtx_file2 = os.path.join(mtxs_2, mtx_file)
matrix_1 = np.loadtxt(mtx_file1, dtype=np.float32, delimiter=',')
matrix_2 = np.loadtxt(mtx_file2, dtype=np.float32, delimiter=',')
data = matrix_1 + matrix_2
csv_path = os.path.join(dest_dir, mtx_file)
np.savetxt(csv_path, data, fmt='%.12f', delimiter=',')
elif mtx_file == 'grids_info.json':
shutil.copyfile(
os.path.join(mtxs_1, mtx_file),
os.path.join(dest_dir, mtx_file)
)


def _tcp_config():
"""Return vtk-config for a thermal comfort map."""
return {
Expand Down
107 changes: 92 additions & 15 deletions ladybug_comfort/map/_helper.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
"""A collection of helper functions for the map sub-package."""
import json
from pathlib import Path
import numpy as np


Expand Down Expand Up @@ -71,21 +73,6 @@ def binary_to_array(
Returns:
A NumPy array.
"""
with open(binary_file, 'rb') as file:
# check if file is NumPy file
numpy_header = file.read(6)
if numpy_header.startswith(b'\x93NUMPY'):
file.seek(0)
array = np.load(file)
return array
file.seek(0)
# check if file has Radiance header, if not it is a text file
radiance_header = file.read(10).decode('utf-8')
if radiance_header != '#?RADIANCE':
file.seek(0)
array = np.genfromtxt(file, dtype=np.float32)
return array

if (nrows or ncols or ncomp or fmt) is None:
# get nrows, ncols and header line count
nrows, ncols, ncomp, line_count, fmt = binary_mtx_dimension(binary_file)
Expand Down Expand Up @@ -120,3 +107,93 @@ def load_matrix(matrix_file, delimiter=','):
array = np.load(matrix_file)

return array


def restore_original_distribution(
input_folder, output_folder, extension='npy', dist_info=None,
output_extension='ill', as_text=False, fmt='%.2f', input_delimiter=',',
delimiter='tab'):
"""Restructure files to the original distribution based on the distribution info.
It will assume that the files in the input folder are NumPy files. However,
if it fails to load the files as arrays it will try to load from binary
Radiance files to array.
Args:
input_folder: Path to input folder.
output_folder: Path to the new restructured folder
extension: Extension of the files to collect data from. Default is ``npy`` for
NumPy files. Another common extension is ``ill`` for the results of daylight
studies.
dist_info: Path to dist_info.json file. If None, the function will try to load
``_redist_info.json`` file from inside the input_folder. (Default: None).
output_extension: Output file extension. This is only used if as_text
is set to True. Otherwise the output extension will be ```npy``.
as_text: Set to True if the output files should be saved as text instead
of NumPy files.
fmt: Format for the output files when saved as text.
input_delimiter: Delimiter for the input files. This is used only if the
input files are text files.
delimiter: Delimiter for the output files when saved as text.
"""
if not dist_info:
_redist_info_file = Path(input_folder, '_redist_info.json')
else:
_redist_info_file = Path(dist_info)

assert _redist_info_file.is_file(), 'Failed to find %s' % _redist_info_file

with open(_redist_info_file) as inf:
data = json.load(inf)

# create output folder
output_folder = Path(output_folder)
if not output_folder.is_dir():
output_folder.mkdir(parents=True, exist_ok=True)

src_file = Path()
for f in data:
output_file = Path(output_folder, f['identifier'])
# ensure the new folder is created. in case the identifier has a subfolder
parent_folder = output_file.parent
if not parent_folder.is_dir():
parent_folder.mkdir()

out_arrays = []
for src_info in f['dist_info']:
st = src_info['st_ln']
end = src_info['end_ln']
new_file = Path(input_folder, '%s.%s' % (src_info['identifier'], extension))
if not new_file.samefile(src_file):
src_file = new_file
try:
array = np.load(src_file)
except:
try:
array = binary_to_array(src_file)
except:
try:
array = np.loadtxt(
src_file, delimiter=input_delimiter)
except Exception:
raise RuntimeError(
f'Failed to load input file "{src_file}"')
slice_array = array[st:end+1,:]

out_arrays.append(slice_array)

out_array = np.concatenate(out_arrays)
# save numpy array, .npy extension is added automatically
if not as_text:
np.save(output_file, out_array)
else:
if output_extension.startswith('.'):
output_extension = output_extension[1:]
if delimiter == 'tab':
delimiter = '\t'
elif delimiter == 'space':
delimiter = ' '
elif delimiter == 'comma':
delimiter = ','
np.savetxt(output_file.with_suffix(f'.{output_extension}'),
out_array, fmt=fmt, delimiter=delimiter)

0 comments on commit 35b5eb7

Please sign in to comment.