-
Notifications
You must be signed in to change notification settings - Fork 1
/
wcps_rasdaman.py
134 lines (111 loc) · 5.05 KB
/
wcps_rasdaman.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def wcps_rasdaman(query, ip='saocompute.eurac.edu', file_name='', rtype = 'ProcessCoverages'):
"""
Sends a WCPS query to a Rasdaman server and wraps the response for further use in Python depending on the
the response format chosen in the query.
Args:
query (str) -- WCPS query you want to send to the Rasdaman server
ip (str) -- IP of Rasdaman server (default saocompute.eurac.edu)
rtype -- get Meta data or coverage
Returns:
Either one of the following
- Numpy array for JSON/CSV formatted response
- Xarray Dataset for a netCDF formatted response
- Filepath to a TIFF/JPEG/JP2/PNG file saved to disk, in respect to the response image type
- The response object, when the response could not be processed
Sources:
http://xarray.pydata.org/en/stable/io.html#netcdf
http://xarray.pydata.org/en/stable/data-structures.html
Author: Harald Kristen, Alexander Jacob
Date: 2020-04-15
"""
import requests
import json
import werkzeug
import numpy as np
import io
import xarray as xr
import uuid
import os
import xml.etree.ElementTree as ET
import base64
import urllib.request as request
import xmltodict
#set the work directory
work_directory = ''#os.getcwd()
#print('WCPS init')
if ip == 'saocompute.eurac.edu/sincohmap' or ip == 'saocompute.eurac.edu':
url = 'http://' + ip + '/rasdaman/ows?SERVICE=WCS&VERSION=2.0.1&REQUEST=ProcessCoverages'
else:
url = 'http://' + ip + ':8080/rasdaman/ows?SERVICE=WCS&VERSION=2.0.1&REQUEST=ProcessCoverages'
query = werkzeug.urls.url_fix(query)
complete_url = url + '&query=' + query
if rtype == 'DescribeCoverage':
complete_url = 'http://' + ip + ':8080/rasdaman/ows?SERVICE=WCS&VERSION=2.0.1&REQUEST=DescribeCoverage&COVERAGEID=' + query
#Fix the special characters used the input query like ' ', $ and so on
print('This is the URL, used for the request:\n' + complete_url)
try:
#Send the request to Rasdaman and save the response in the variable "r"
r = requests.get(complete_url, stream=True)
except Exception as ex:
print(tpye(ex))
print(ex.args)
print(ex)
#If there is an error, plot the error message that comes from Rasdaman & exit script
if r.status_code != requests.codes.ok:
print('HTTP Error ' + str(r.status_code))
root = ET.fromstring(r.text)
for element in root.iter():
if element.tag == '{http://www.opengis.net/ows/2.0}ExceptionText':
print(element.text)
print('currently receiving content of type: ' + r.headers['Content-Type'])
#print('This is the URL, used for the request \n' + r.url)
if r.headers['Content-Type'] == 'text/plain':
#print('return type is text')
# Convert CSV or json to NumpyArray
response_text = r.text()
output = response_text
# The JSON version also works for 2D arrays.
if response_text.startswith("{"):
loaded = r.json()
output = np.array(loaded)
else:
output = np.fromstring(response_text[1:-1], dtype = float, sep = ',')
elif r.headers['Content-Type'] == 'text/xml':
# Create XML object as Ordered Dictionary here....
file = request.urlopen(complete_url)
data = file.read()
file.close()
output = xmltodict.parse(data)
elif r.headers['Content-Type'] == 'application/json':
# Convert JSON to NumpyArray
loaded = r.json()
output = np.array(loaded)
elif r.headers['Content-Type'] == 'application/netcdf':
print(r.headers)
# create x array dataset from input stream
if file_name == '':
file_name = 'wcps_' + str(uuid.uuid4()) + '.nc'
print('the following file has been saved locally: ' + file_name)
with io.open(file_name, 'wb') as outfile:
outfile.write(r.content)
output_open = xr.open_dataset(file_name)
# Xarray is normally lazy loading netCDF files
# As we want to perform intense computation, we load the file directly in the main memory with Dataset.load()
output = xr.Dataset.load(output_open)
elif r.headers['Content-Type'] in ['image/tiff', 'image/png', 'image/jp2', 'image/jpeg']:
# Write response in choosen image format to disk and print filepath
image_type = r.headers['Content-Type']
if file_name == '':
file_ending = image_type[6:]
# write TIFF to disk and print filepath
tf = 'wcps_' + str(uuid.uuid4())
file_name = tf + '.' + file_ending
with io.open(file_name, 'wb') as outfile:
outfile.write(r.content)
print('the following file has been saved locally: ' + file_name)
output = file_name
else:
output = r
output_type = r.headers['Content-Type']
print('The response could not be processed, as it is a ' + output_type)
return output