TNO Intern

Commit dd824c4e authored by Arjo Segers's avatar Arjo Segers
Browse files

Reformated with `black`.

parent e26ea414
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -8,6 +8,9 @@
# 2023, Lewis Blake, Arjo Segers
#   Formatted using "black".
#
# 2025-01, Arjo Segers
#   Added support for ColHub mirror.
#


# -------------------------------------------------
@@ -65,6 +68,8 @@ and are defined according to the following hierchy:
  * :py:class:`.CSO_Inquire_Plot`
  * :py:class:`.CSO_DataSpace_Inquire`
  * :py:class:`.CSO_PAL_Inquire`
  * :py:class:`.CSO_ColHubMirror_Inquire`
  * :py:class:`.CSO_ColHubMirror_Missing`
  * :py:class:`.CSO_S5p_Convert`
  * :py:class:`.CSO_S5p_Listing`
  * :py:class:`.CSO_GriddedAverage`
+85 −80
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
# 2023-08, Arjo Segers
#   Reformatted using 'black'.
#
# 2035-01, Arjo Segers
# 2025-01, Arjo Segers
#   Extracted parts from former "cso_scihub.py" module to support "colhub",
#   the Norwegian mirror site.
#
@@ -126,7 +126,6 @@ class CSO_ColHubMirror_Inquire(utopya.UtopyaRc):
    """

    def __init__(self, rcfile, rcbase="", env={}, indent=""):

        """
        Scan file archive.
        """
@@ -134,6 +133,7 @@ class CSO_ColHubMirror_Inquire(utopya.UtopyaRc):
        # modules:
        import os
        import datetime

        # import glob
        import collections
        import fnmatch
@@ -177,12 +177,10 @@ class CSO_ColHubMirror_Inquire(utopya.UtopyaRc):

            # recursively search for files:
            for root, dirs, files in os.walk(archive_dir):

                # loop over files:
                for fname in files:
                    # data file?
                    if fnmatch.fnmatch(fname, fpattern):

                        # already in table?
                        if fname in listing:
                            # info ...
@@ -221,9 +219,13 @@ class CSO_ColHubMirror_Inquire(utopya.UtopyaRc):
                                    prod_time,
                                ) = rest.split("_")
                            except:
                                logging.error(f"could not extract filename parts; expected format:")
                                logging.error(f"  S5P_RPRO_L2__CH4____20180430T001851_"
                                              +"20180430T020219_02818_01_010301_20190513T141133" )
                                logging.error(
                                    f"could not extract filename parts; expected format:"
                                )
                                logging.error(
                                    f"  S5P_RPRO_L2__CH4____20180430T001851_"
                                    + "20180430T020219_02818_01_010301_20190513T141133"
                                )
                                logging.error(f"found:")
                                logging.error(f"  {bname}")
                                raise
@@ -277,8 +279,6 @@ class CSO_ColHubMirror_Inquire(utopya.UtopyaRc):
# endclass CSO_ColHubMirror_Inquire




########################################################################
###
### create listing files missing in archive
@@ -338,7 +338,6 @@ class CSO_ColHubMirror_Missing(utopya.UtopyaRc):
    """

    def __init__(self, rcfile, rcbase="", env={}, indent=""):

        """
        Convert data.
        """
@@ -346,6 +345,7 @@ class CSO_ColHubMirror_Missing(utopya.UtopyaRc):
        # modules:
        import os
        import datetime

        # import glob
        import collections
        import fnmatch
@@ -380,7 +380,9 @@ class CSO_ColHubMirror_Missing(utopya.UtopyaRc):
            # table with all available files:
            listfile_all = self.GetSetting("all.file")
            # evaluate time?
            filedate = self.GetSetting( "all.filedate", totype="datetime", default=datetime.datetime.now() )
            filedate = self.GetSetting(
                "all.filedate", totype="datetime", default=datetime.datetime.now()
            )
            listfile_all = filedate.strftime(listfile_all)
            # read:
            listing_all = cso_file.CSO_Listing(listfile_all)
@@ -388,7 +390,9 @@ class CSO_ColHubMirror_Missing(utopya.UtopyaRc):
            # table with currently already available files:
            listfile_curr = self.GetSetting("curr.file")
            # evaluate time?
            filedate = self.GetSetting( "curr.filedate", totype="datetime", default=datetime.datetime.now() )
            filedate = self.GetSetting(
                "curr.filedate", totype="datetime", default=datetime.datetime.now()
            )
            listfile_curr = filedate.strftime(listfile_curr)
            # read:
            listing_curr = cso_file.CSO_Listing(listfile_curr)
@@ -418,7 +422,8 @@ class CSO_ColHubMirror_Missing(utopya.UtopyaRc):
                # returns None if no records are found:
                xlst = listing_all.Select(orbit=orbit, expr=selection_expr, indent="    ")
                # nothing selected?
                if len(xlst) == 0 : continue
                if len(xlst) == 0:
                    continue

                # loop over selected records:
                for irec in range(len(xlst)):
+26 −18
Original line number Diff line number Diff line
@@ -400,7 +400,9 @@ class CSO_DataSpace_Inquire(utopya.UtopyaRc):

                # check ...
                if type(data) != dict:
                    logging.error(f"request response should be a json dict, found type: {type(data)}")
                    logging.error(
                        f"request response should be a json dict, found type: {type(data)}"
                    )
                    raise Exception
                # endif
                # check ...
@@ -471,7 +473,9 @@ class CSO_DataSpace_Inquire(utopya.UtopyaRc):
                        # same href already stored?
                        if href in output_df["href"].values:
                            # testing ...
                            logging.warning(f"{indent}      ignore double product_id: {product_id}")
                            logging.warning(
                                f"{indent}      ignore double product_id: {product_id}"
                            )
                            # ignore record:
                            continue
                        # endif
@@ -489,7 +493,9 @@ class CSO_DataSpace_Inquire(utopya.UtopyaRc):
                        "href": [href],
                    }
                    # add record:
                    output_df = pandas.concat((output_df, pandas.DataFrame(rec)), ignore_index=True)
                    output_df = pandas.concat(
                        (output_df, pandas.DataFrame(rec)), ignore_index=True
                    )

                # endfor features

@@ -725,7 +731,9 @@ class CSO_DataSpace_Downloader(object):

    # *

    def DownloadFile(self, href, output_file, maxtry=10, nsec_wait=5, nsec_wait_max=600, indent=""):
    def DownloadFile(
        self, href, output_file, maxtry=10, nsec_wait=5, nsec_wait_max=600, indent=""
    ):
        """
        Download file from DataSpace.

+43 −44
Original line number Diff line number Diff line
@@ -1234,7 +1234,6 @@ class CSO_Listing(object):

        # read?
        if filename is not None:

            # check ..
            if not os.path.isfile(filename):
                logging.error("listing file not found: %s" % filename)
@@ -1646,7 +1645,6 @@ class CSO_Listing(object):
    # *

    def Sort(self, by="filename"):
    
        """
        Sort listing table by filename or other key.
        """
@@ -1656,6 +1654,7 @@ class CSO_Listing(object):

    # endef Sort


# endclass CSO_Listing


+4 −4
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ class CSO_Inquire_Plot(utopya.UtopyaRc):
            collections.sort()

            # adhoc: skip test collections "90", "91", ..
            ii, = numpy.where( collections < "90" )
            (ii,) = numpy.where(collections < "90")
            if len(ii) > 0:
                collections = collections[ii]
            else:
Loading