# # Changes # # 2023-10, Arjo Segers # Tools to access Copernicus DataSpace. # # 2023-11, Arjo Segers # Extended error traps. # Store access token in object to avoid server errors. # Retry if downloaded zipfile is corrupted. # # 2024-01, Arjo Segers # Do not use timeout when downloading. # ######################################################################## ### ### help ### ######################################################################## """ .. _cso-dataspace: ************************ ``cso_dataspace`` module ************************ The ``cso_dataspace`` module provides classes for accessing data from the `Copernicus DataSpace `_. To browse through the data, use the `Browser `_. .. _dataspace-account: Account setup ============= To be able to download data from the *DataSpace*, first `Register and create an account `_. On a Linux system, login/passwords for websites can be stored in the users ``.netrc`` file in the home directory. Create this file if it does not exist yet, and add the following line with the login name of the account (your email) and the chosen password:: machine zipper.dataspace.copernicus.eu login Your.Name@institute.org password *********** The file should be readible and writable for you only:: chmod 400 ~/.netrc .. _dataspace-api: DataSpace API's =============== The *DataSpace* could be access with a number of different `APIs `_. Currently the `OpenSearch API `_ is used as that was the first that worked as needed. In future the `STAC API `_ might be used, as this is becoming more and more the standard in the Earth Observation community. Within CSO it was already used by for example :ref:`pal-api`, but could not get working yet for the *DataSpace*. See the `STAC product catalog `_ for more information. Class hierchy ============= The classes and are defined according to the following hierchy: * :py:class:`.UtopyaRc` * :py:class:`.CSO_DataSpace_Inquire` * :py:class:`CSO_DataSpace_DownloadFile` * :py:class:`NullAuth` Classes ======= """ ######################################################################## ### ### modules ### ######################################################################## # modules: import logging import requests # tools: import utopya ######################################################################## ### ### OpenSearch inquire ### ######################################################################## class CSO_DataSpace_Inquire(utopya.UtopyaRc): """ Inquire available Sentinel data from the `Copernicus DataSpace `_. Before data could be downloaded from the *DataSpace*, setup your :ref:`dataspace-account`. Currently the `OpenSearch API `_ is used as that was the first that worked as needed; in future, the `STAC product catalog `_ might be used. A query is sent to search for products that are available for a certain time and overlap with a specified region. The result is a list with orbit files and instructions on how to download them. In the settings, specify the time range over which files should be downloaded:: .timerange.start : 2018-07-01 00:00 .timerange.end : 2018-07-01 23:59 Specify the base url of the API:: .url : https://finder.creodias.eu/resto/api Define the collection name with:: .collection : Sentinel5P Provide a product type:: ! product type (always 10 characters!): .producttype : L2__NO2___ Eventually specify a target area, only orbits with some pixels within the defined box will be downloaded:: ! target area, leave empty for globe; format: west,south,east,north .area : !.area : -30,30,35,76 The table will also create the url's to download a file; specifity the template that should be used: ! template for download url given "{product_id}": .download_url : https://zipper.dataspace.copernicus.eu/odata/v1/Products({product_id})/$value Name of output csv file:: ! output table, here including date of today: .output.file : ${my.work}/PAL_S5P_NO2_%Y-%m-%d.csv Example records (with extra whitespace to show the columns):: orbit;start_time ;end_time ;processing;collection;processor_version;filename ;href 11488;2020-01-01 02:34:16;2020-01-01 04:15:46;RPRO ;03 ;020400 ;S5P_RPRO_L2__CH4____20200101T023416_20200101T041546_11488_03_020400_20221120T003820.nc;https://zipper.dataspace.copernicus.eu/odata/v1/Products(b3f240e6-505d-4cae-97ea-43a8778a318d)/$value 11487;2020-01-01 00:52:46;2020-01-01 02:34:16;RPRO ;03 ;020400 ;S5P_RPRO_L2__CH4____20200101T005246_20200101T023416_11487_03_020400_20221120T003818.nc;https://zipper.dataspace.copernicus.eu/odata/v1/Products(a3d40f81-6c86-44bc-bc4b-457ff069b121)/$value : """ def __init__(self, rcfile, rcbase="", env={}, indent=""): """ Inquire oribt files. """ # modules: import sys import os import datetime import calendar import time import requests import pandas # info ... logging.info(f"{indent}") logging.info(f"{indent}** Inquire files available on Copernicus DataSpace") logging.info(f"{indent}") # init base object: utopya.UtopyaRc.__init__(self, rcfile=rcfile, rcbase=rcbase, env=env) # number of seconds to wait in retry loop: nsec_wait = 10 # url of API: api_url = self.GetSetting("url") # info ... logging.info(f"{indent}API url : {api_url}") # template url for downloads: download_url = self.GetSetting("download_url") # info ... logging.info(f"{indent}download url : {download_url}") # collection: collection = self.GetSetting("collection") # info ... logging.info(f"{indent}collection : {collection}") # combine into search url: search_url = f"{api_url}/collections/{collection}/search.json" # time range: t1 = self.GetSetting("timerange.start", totype="datetime") t2 = self.GetSetting("timerange.end", totype="datetime") # info ... tfmt = "%Y-%m-%d %H:%M" logging.info(f"{indent}timerange : [{t1.strftime(tfmt)},{t2.strftime(tfmt)}") # product type (always 10 characters!): # L2__NO2___ producttype = self.GetSetting("producttype") # info ... logging.info(f"{indent}product type : {producttype}") # area of interest: west,south:east,north area = self.GetSetting("area") # defined? if len(area) > 0: # convert from format for "dhusget.sh": # west,south:east,north west, south, east, north = map(float, area.replace(":", " ").replace(",", " ").split()) # info ... logging.info( f"{indent}area : [{west:.2f},{east:.2f}] x [{south:.2f},{north:.2f}]" ) # box parameter: box = f"{west},{east},{south},{north}" else: # info ... logging.info(f"{indent}area : no") # box parameter: box = None # endif # target file, might include time templates: output_file__template = self.GetSetting("output.file") # current time: output_file = datetime.datetime.now().strftime(output_file__template) # initialize output table: output_df = pandas.DataFrame() # info ... logging.info(f"{indent}search all items in timerange ...") # search query could only return a maximum number of records; # a 'page' of records is requested using a row offset and the number of rows: row0 = 0 nrow = 100 # initialize search parameters; # for possible content, see: # https://documentation.dataspace.copernicus.eu/APIs/OpenSearch.html params = {} # fill maximum time range: tfmt = "%Y-%m-%dT%H:%M:%SZ" params["startDate"] = t1.strftime(tfmt) params["completionDate"] = t2.strftime(tfmt) if box is not None: params["box"] = box # endif # fill product type: params["productType"] = producttype # fill paging info: params["maxRecords"] = nrow # init counter: ipage = 0 # loop over pages of query result: while True: # increase counter: ipage += 1 # info ... logging.info(f"{indent} page {ipage} (entries {row0+1},..,{row0+nrow})") # fill page number: params["page"] = ipage # number of tries: ntry = 1 maxtry = 5 # repeat a few times if necessary: while ntry <= maxtry: try: # send query to search page; no authorization is needed ... r = requests.get(search_url, params=params) # check status, raise error if request failed: r.raise_for_status() except Exception as err: msg = str(err) logging.warning(f"{indent} from query; message received:") logging.warning(f"{indent}%s" % msg) if ntry == maxtry: logging.error(f"{indent} tried {ntry} times now, exit ...") raise Exception else: logging.warning(f"{indent} wait {nsec_wait} seconds ..") time.sleep(nsec_wait) logging.warning(f"{indent} attempt {ntry} / {maxtry} ...") ntry += 1 continue # endif # endtry # no error, leave: break # endwhile # While testing: save the result as a json file, and load it into a browser. # This shows a dict with among others the fields: # # { .. # 'features' : [ # list of orbits, in browser named: '0','1',... # { 'id' : '0f318743-8bb9-55ed-b42d-7721b24f7ede', # download id # 'properties' : { # 'title' : "S5P_OFFL_L2__CH4____20220531T224613_20220601T002743_23999_02_020301_20220602T143707.nc", # ... # } # ... # }, # ... # ] # } # # save result? if True: # targefile: qfile = "query.json" # save: with open(qfile, "w") as f: f.write(r.text) # endwith # endif # convert response to json dict: data = r.json() # check ... if type(data) != dict: logging.error(f"request response should be a json dict, found type: {type(data)}") raise Exception # endif # check ... if "features" not in data.keys(): logging.error(f"element 'features' not found in response") raise Exception # endif # count: nrec = len(data["features"]) # loop over features: for feature in data["features"]: # check ... if type(feature) != dict: logging.error(f"feature should be a dict, found type: {type(feature)}") raise Exception # endif # check ... if "id" not in feature.keys(): logging.error(f"element 'id' not found in feature") raise Exception # endif # get product id: product_id = feature["id"] # check ... if "properties" not in feature.keys(): logging.error(f"element 'properties' not found in feature") raise Exception # endif # check ... if "title" not in feature["properties"].keys(): logging.error(f"element 'properties/title' not found in feature") raise Exception # endif # get full filename: filename = feature["properties"]["title"] # # S5P_OFFL_L2__NO2____20180701T005930_20180701T024100_03698_01_010002_20180707T022838.nc # plt proc [product-] [starttime....] [endtime......] orbit cl procrv [prodtime.....] # bname = os.path.basename(filename).replace(".nc", "") # split: platform_name, processing, rest = bname.split("_", 2) product_type = rest[0:10] parts = rest[11:].split("_") start_time, end_time, orbit, collection, processor_version, prod_time = parts # convert: tfmt = "%Y%m%dT%H%M%S" ts = datetime.datetime.strptime(start_time, tfmt) te = datetime.datetime.strptime(end_time, tfmt) # fill download href: href = download_url.format(product_id=product_id) # strange, sometimes records seem double ... # already records present? if len(output_df) > 0: # same href already stored? if href in output_df["href"].values: ## testing ... # logging.warning(f"ignore double product_id: {product_id}") # ignore record: continue # endif # endif # fill record, values should be lists for concatenation below: rec = { "orbit": [orbit], "start_time": [ts], "end_time": [te], "processing": [processing], "collection": [collection], "processor_version": [processor_version], "filename": [filename], "href": [href], } # add record: output_df = pandas.concat((output_df, pandas.DataFrame(rec)), ignore_index=True) # endfor features ## testing... # if ipage == 9 : # logging.warning( f"break after page {ipage} ..." ) # break ## endif # not a full page? then end is reached ... if nrec < nrow: # leave loop over pages: break # endif # increse row offset: row0 += nrow # endwhile # pages # info .. logging.info(f"{indent}save to: %s ..." % output_file) # create directory: dirname = os.path.dirname(output_file) if len(dirname) > 0: if not os.path.isdir(dirname): os.makedirs(dirname) # endif # endif # write: output_df.to_csv(output_file, sep=";", index=False) # info ... logging.info(f"{indent}") logging.info(f"{indent}** end inquire") logging.info(f"{indent}") # enddef __init__ # endclass CSO_DataSpace_Inquire ######################################################################## ### ### OpenSearch download ### ######################################################################## class NullAuth(requests.auth.AuthBase): """ Force requests to ignore the ``~/.netrc`` file. Some sites do not support regular authentication, but we still want to store credentials in the ``~/.netrc`` file and submit them as form elements. Without this, requests would otherwise use the ``~/.netrc`` which leads, on some sites, to a 401 error. Use with:: requests.get( url, auth=NullAuth() ) Source: ``_ """ def __call__(self, r): return r # enddef __call__ # endclass NullAuth # * class CSO_DataSpace_Downloader(object): """ Class to download single file from *Copernicus DataSpace*. The object defined by the class is used to store an access token that is re-used until it is expired. The :py:class:`DownloadFile` method should be used to actually download a file. Usage:: # initialize downloader: downloader = CSO_DataSpace_Downloader() # download file: downloader.DownloadFile( "https://zipper.dataspace.copernicus.eu/odata/v1/Products('d483baa0-3a61-4985-aa0c-5642a83c9214')/$value", "orbit.nc" ) """ def __init__(self): """ Initialize downloader. """ # no token yet: self.access_token = None # enddef __init__ # * def CreateToken(self, href, indent=""): """ Create access token. """ # modules: import urllib.parse import requests # number of seconds to wait in retry loop: nsec_wait = 10 # # On linux system, login/passwords for websites and ftp can be stored in "~/.netrc" file: # ---[~/.netrc]----------------------------------------------- # machine zipper.dataspace.copernicus.eu login Your.Name@institute.org password *********** # ------------------------------------------------------------ # Retrieve the login/password from ~/.netrc to avoid hardcoding them in a script. # # the "get_netrc_auth" function requires base of url as first argument, # for example: https://zipper.dataspace.copernicus.eu # extract parts from download url: p = urllib.parse.urlparse(href) url = f"{p.scheme}://{p.netloc}" # get username and password from ~/.netrc file: try: username, password = requests.utils.get_netrc_auth(url, raise_errors=True) except: logging.error(f"Could not get username and password from ~/.netrc file for url:") logging.error(f" {url}") logging.error(f"For the Copernicus DataSpace, the file should contain:") logging.error(f" machine {p.netloc} login **** password ****") raise Exception # endtry # convert into token for dataspace website following: # https://documentation.dataspace.copernicus.eu/APIs/Token.html # fill data fields: data = { "client_id": "cdse-public", "username": username, "password": password, "grant_type": "password", } # identity server: domain = "identity.dataspace.copernicus.eu" url = f"https://{domain}/auth/realms/CDSE/protocol/openid-connect/token" # retry loop .. ntry = 1 while True: # try to obtain token: try: # send request: r = requests.post(url, data=data) # check status, raise error if request failed: r.raise_for_status() # extract token from response: self.access_token = r.json()["access_token"] # all ok, leave try loop: break except requests.exceptions.HTTPError as err: # info .. msg = str(err) logging.error(f"{indent}exception from token creation; message received:") logging.error(f"{indent} {msg}") # catch known problem ... if msg.startswith("401 Client Error: Unauthorized for url:"): logging.error(f"{indent}Possible causes:") logging.error(f"{indent} * Just a random failure ...") logging.error( f"{indent} * The (login,password) pair received from your '~/.netrc' file are incorrect." ) logging.error( f"{indent} For the Copernicus DataSpace, the file should contain:" ) logging.error(f"{indent} machine {p.netloc} login **** password ****") logging.error( f"{indent} If the machine was not found, a default might have been received." ) logging.error(f"{indent} Login received: {username}") logging.error( f"{indent} * Too many logins? Try to run single processing only." ) logging.error( f"{indent} * System maintenance? Check the Copernicus DataSpace website." ) else: logging.error( f"{indent}Access token creation failed; server response: {r.json()}" ) # endif except: # info ... logging.error(f"{indent}Access token creation failed; server response: {r.json()}") # end try # increase counter: ntry += 1 # switch: if ntry == maxtry: logging.warning(f"{indent}tried {maxtry} times; exit ...") raise Exception else: logging.warning( f"{indent}exception from token creation; wait {nsec_wait} seconds ..." ) time.sleep(nsec_wait) logging.warning(f"{indent}attempt {ntry} / {maxtry} ...") continue # while-loop # endif # endwhile # retry # enddef CreateToken # * def DownloadFile(self, href, output_file, maxtry=10, nsec_wait=60, indent=""): """ Download file from DataSpace. If a request fails it is tried again up to a maximum of ``maxtry`` times, with a delay of ``nsec_wait`` between requsts. Arguments: * ``href`` : download url, for example:: https://zipper.dataspace.copernicus.eu/odata/v1/Products('d483baa0-3a61-4985-aa0c-5642a83c9214')/$value * ``output_file`` : target file Optional arguments: * ``maxtry`` : number of times to try again if download fails * ``nsec_wait`` : delay in seconds between requests """ # modules: import sys import os import time import requests import zipfile import shutil # tools: import cso_file # no token yet? if self.access_token is None: # info .. logging.info(f"{indent}create token ...") # create token, re-use until error is received ... self.CreateToken(href, indent=indent) # endif # retry loop .. ntry = 1 while True: # try to download and save: try: # fill authorization token in header: headers = {"Authorization": f"Bearer {self.access_token}"} # ensure that "~/.netrc" is ignored by passing null-authorization, # otherwise the token in the header is overwritten by a token formed # from the login/password in the rcfile if that is found: r = requests.get(href, auth=NullAuth(), headers=headers) # check status, raise error if request failed: r.raise_for_status() # product is a zip-file: product_file = "product.zip" # info .. logging.info(f"{indent}write to {product_file} ...") # write to temporary target first .. tmpfile = product_file + ".tmp" # open destination file for binary write: with open(tmpfile, "wb") as fd: # prefered way to write content following: # https://docs.python-requests.org/en/master/user/quickstart/ for chunk in r.iter_content(chunk_size=128): fd.write(chunk) # endfor # endwith # rename: os.rename(tmpfile, product_file) # open product file: arch = zipfile.ZipFile(product_file, mode="r") # loop over members, probably two files in a directory: # S5P_RPRO_L2__CH4____20200101T005246_etc/S5P_RPRO_L2__CH4____20200101T005246_etc.cdl # S5P_RPRO_L2__CH4____20200101T005246_etc.nc for member in arch.namelist(): # ncfile? if member.endswith(".nc"): # this should be the target file .. if os.path.basename(member) != os.path.basename(output_file): logging.error(f"member of archive file: {member}") logging.error(f"differs from target name: {output_file}") raise Exception # endif # info .. logging.info(f"{indent}extract {member} ...") # extract here, including leading directory: arch.extract(member) # info .. logging.info(f"{indent}store ...") # create target dir if necessary: cso_file.CheckDir(output_file) # move to destination: os.rename(member, output_file) # remove directory tree: shutil.rmtree(os.path.dirname(member)) # only one file in package; leave loop over members break # endif # endfor # members # info .. logging.info(f"{indent}remove product file ...") # remove package: os.remove(product_file) # all ok, leave retry loop: break except requests.exceptions.HTTPError as err: # info .. msg = str(err) logging.warning(f"{indent}exception from download; message received:") logging.warning(f"{indent} %s" % msg) # catch known problem ... if msg.startswith("401 Client Error: Unauthorized for url:"): logging.warning(f"{indent}renew token ...") self.CreateToken(href, indent=indent) # endif except MemoryError as err: logging.error("memory error from download; increase resources?") # quit with error: raise except Exception as err: # info .. logging.error("from download; message received:") logging.error(" %s" % str(err)) # catch known problem ... if msg.startswith("File is not a zip file"): logging.warning(f"{indent}maybe download was interrupted, try again ...") else: # quit with error: raise # endif # endtry # increase counter: ntry += 1 # switch: if ntry == maxtry: logging.warning(f"{indent}tried {maxtry} times; exit ...") raise Exception else: logging.warning(f"{indent}wait {nsec_wait} seconds ...") time.sleep(nsec_wait) logging.warning(f"{indent}attempt {ntry} / {maxtry} ...") continue # while-loop # endif # endwhile # retry # enddef DownloadFile # endclass CSO_DataSpace_Downloader ######################################################################## ### ### end ### ########################################################################