Compare commits
	
		
			57 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| a5534212ba | |||
| d661cad991 | |||
| 3136c4985d | |||
| deb7005604 | |||
| fe9d886499 | |||
| f7eb39c43c | |||
| 00bd39a098 | |||
| 5a1f43d6cd | |||
| a1c0ae36bb | |||
| 63351ceb10 | |||
| 65759b86f1 | |||
| a8233950e1 | |||
| 86da5c3246 | |||
| 9e75e0e768 | |||
| a73f17fc35 | |||
| 2fbca21917 | |||
| 910d933467 | |||
| e06f4a5a05 | |||
| 2906af6918 | |||
| dd84829b6d | |||
| 5b25b93090 | |||
| 9fd7de6124 | |||
| d56aaeef39 | |||
| b4ef228a03 | |||
| fc2e8b53c7 | |||
| 664ab1025b | |||
| 36378f4d6c | |||
| 1244655a68 | |||
| 70c08f47ab | |||
| 60ae1c96cd | |||
| 1ea6c85ab2 | |||
| 84440152eb | |||
| a941939493 | |||
| f4d2cfc3cd | |||
| bd1ad26693 | |||
| bc4d57a5ab | |||
| 921de3f423 | |||
| b287715f44 | |||
| eb14b7d235 | |||
| 3ffcf2c4db | |||
| 6f3d95974e | |||
| 4adfbb6a54 | |||
| bd468927f3 | |||
| bda00e225c | |||
| a0f29de47f | |||
| 86fb03c792 | |||
| 846078352b | |||
| f1472bf250 | |||
| d12e91d951 | |||
| 7c484e3974 | |||
| 7a39d5a07e | |||
| 9c58664770 | |||
| 17cbcc8e79 | |||
| cce0cd258d | |||
| c20f7c06a7 | |||
| 22fc9f7c07 | |||
| 8f1ab5518a | 
							
								
								
									
										13
									
								
								LICENCE.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								LICENCE.txt
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,13 @@
 | 
			
		||||
Copyright 2025 Institute of Geophysics, Polish Academy of Sciences
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
							
								
								
									
										142
									
								
								gsim/lasocki_2013.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								gsim/lasocki_2013.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,142 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
 | 
			
		||||
#
 | 
			
		||||
# Copyright (C) 2013-2023 GEM Foundation
 | 
			
		||||
#
 | 
			
		||||
# OpenQuake is free software: you can redistribute it and/or modify it
 | 
			
		||||
# under the terms of the GNU Affero General Public License as published
 | 
			
		||||
# by the Free Software Foundation, either version 3 of the License, or
 | 
			
		||||
# (at your option) any later version.
 | 
			
		||||
#
 | 
			
		||||
# OpenQuake is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Affero General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Affero General Public License
 | 
			
		||||
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Module exports :class:`Lasocki2013`.
 | 
			
		||||
"""
 | 
			
		||||
import numpy as np
 | 
			
		||||
from scipy.constants import g
 | 
			
		||||
 | 
			
		||||
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
 | 
			
		||||
from openquake.hazardlib import const
 | 
			
		||||
from openquake.hazardlib.imt import PGA, PGV, SA
 | 
			
		||||
 | 
			
		||||
def get_magnitude_energy(mag):
 | 
			
		||||
    """
 | 
			
		||||
    Converts magnitude to energy-based magnitude term.
 | 
			
		||||
    """
 | 
			
		||||
    return 1.15 + 1.96 * mag
 | 
			
		||||
 | 
			
		||||
def get_distance_term(coeffs, repi):
 | 
			
		||||
    """
 | 
			
		||||
    Computes the distance term using the given GMPE coefficients.
 | 
			
		||||
    """
 | 
			
		||||
    R_h = np.sqrt(repi ** 2 + coeffs["c7"] ** 2)
 | 
			
		||||
    return np.log10(R_h)
 | 
			
		||||
 | 
			
		||||
def get_standard_deviation(coeffs, coeffs_cov, magE, repi):
 | 
			
		||||
    """
 | 
			
		||||
    Computes the standard deviation term.
 | 
			
		||||
    """
 | 
			
		||||
    Cb = np.array(list(coeffs_cov)).reshape(3,3)
 | 
			
		||||
    R_h = np.sqrt(repi ** 2 + coeffs["c7"] ** 2)
 | 
			
		||||
    X0 = np.array([1, magE[0], np.log10(R_h[0]**2)])
 | 
			
		||||
    variance_term = np.sqrt(X0 @ Cb @ X0 + coeffs["sigma"]**2)
 | 
			
		||||
    return variance_term
 | 
			
		||||
 | 
			
		||||
class Lasocki2013(GMPE):
 | 
			
		||||
    """
 | 
			
		||||
    Implement equation developed by Lasocki in "REPORT ON THE ATTENUATION 
 | 
			
		||||
    RELATIONS OF PEAK GROUND ACCELERATION AND SPECTRAL ORDINATES OF GROUND 
 | 
			
		||||
    MOTION FOR MINING-INDUCED SEISMIC EVENTS IN THE REGION OF THE ŻELAZNY 
 | 
			
		||||
    MOST REPOSITORY", 2009.
 | 
			
		||||
    Equation coefficients provided for the random horizontal component
 | 
			
		||||
    """
 | 
			
		||||
    #: Supported tectonic region type is induced, given
 | 
			
		||||
    #: that the equations have been derived for the LGCD mining area
 | 
			
		||||
    DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.INDUCED
 | 
			
		||||
 | 
			
		||||
    #: Supported intensity measure types are spectral acceleration,
 | 
			
		||||
    #: and peak ground acceleration
 | 
			
		||||
    DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, SA}
 | 
			
		||||
 | 
			
		||||
    #: Supported intensity measure component is random horizontal
 | 
			
		||||
    #: :attr:`~openquake.hazardlib.const.IMC.RANDOM_HORIZONTAL`,
 | 
			
		||||
    DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RANDOM_HORIZONTAL
 | 
			
		||||
    # DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.VERTICAL
 | 
			
		||||
 | 
			
		||||
    #: Supported standard deviation type is total
 | 
			
		||||
    DEFINED_FOR_STANDARD_DEVIATION_TYPES = {const.StdDev.TOTAL}
 | 
			
		||||
 | 
			
		||||
    #: site params are not required
 | 
			
		||||
    REQUIRES_SITES_PARAMETERS = set()
 | 
			
		||||
 | 
			
		||||
    #: Required rupture parameter is magnitude
 | 
			
		||||
    REQUIRES_RUPTURE_PARAMETERS = {'mag'}
 | 
			
		||||
 | 
			
		||||
    #: Required distance measure is epicentral distance
 | 
			
		||||
    #: see paragraph 'Predictor Variables', page 6.
 | 
			
		||||
    REQUIRES_DISTANCES = {'repi'}
 | 
			
		||||
 | 
			
		||||
    def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
 | 
			
		||||
        """
 | 
			
		||||
        Computes mean ground motion values and standard deviations for Lasocki (2013) GMPE.
 | 
			
		||||
        
 | 
			
		||||
        Parameters:
 | 
			
		||||
            repi (float): Epicentral distance (m)
 | 
			
		||||
            mag (float): Earthquake magnitude
 | 
			
		||||
            imts (list of str): List of intensity measure types (e.g., 'PHA', 'PVA')
 | 
			
		||||
            mean (np.array): Array to store computed mean values
 | 
			
		||||
            sig (np.array): Array to store computed standard deviations
 | 
			
		||||
        """
 | 
			
		||||
        # Loop through each IMT and compute values
 | 
			
		||||
        for i, imt in enumerate(imts):            
 | 
			
		||||
            C = self.COEFFS[imt]
 | 
			
		||||
            C_cov = self.COEFFS_COV[imt]
 | 
			
		||||
            mag = ctx.mag
 | 
			
		||||
            repi = ctx.repi*1000.0 # Convert distance from km to m
 | 
			
		||||
            # Compute magnitude energy term
 | 
			
		||||
            magE = get_magnitude_energy(mag)
 | 
			
		||||
 | 
			
		||||
            # Compute GMPE terms
 | 
			
		||||
            mag_term = C['c1'] + C['c2'] * magE
 | 
			
		||||
            dist_term = C['c5'] * get_distance_term(C, repi)
 | 
			
		||||
            
 | 
			
		||||
            # Compute mean ground motion
 | 
			
		||||
            imean = mag_term + dist_term
 | 
			
		||||
            mean_value = np.log((10 ** imean) / g)  # Convert to natural log scale and divide by g
 | 
			
		||||
            mean[i] = mean_value
 | 
			
		||||
 | 
			
		||||
            # Compute standard deviation
 | 
			
		||||
            sigma = get_standard_deviation(C, C_cov, magE, repi)
 | 
			
		||||
            sig[i] = sigma
 | 
			
		||||
 | 
			
		||||
    #: coefficient table provided by report
 | 
			
		||||
    COEFFS = CoeffsTable(sa_damping=5, table="""\
 | 
			
		||||
    IMT  c1     c2      c5      c7    sigma     N        
 | 
			
		||||
    pga  1.25   0.31    -1.34   558   0.196     1196
 | 
			
		||||
    0.6  -3.86  0.55    -0.65   183   0.242     1194
 | 
			
		||||
    1.0  -3.94  0.62    -0.66   308   0.262     1197
 | 
			
		||||
    2.0  -1.14  0.47    -1.02   741   0.235     1195
 | 
			
		||||
    5.0  0.99   0.31    -1.15   690   0.234     1206
 | 
			
		||||
    10.0 3.06   0.27    -1.65   906   0.203     1192
 | 
			
		||||
    20.0 2.62   0.27    -1.60   435   0.196     1191
 | 
			
		||||
    50.0 2.09   0.27    -1.48   375   0.204     1191
 | 
			
		||||
    """)
 | 
			
		||||
    
 | 
			
		||||
    COEFFS_COV = CoeffsTable(sa_damping=5, table="""\
 | 
			
		||||
    IMT  Cb00      Cb01      Cb02      Cb10      Cb11      Cb12      Cb20      Cb21      Cb22        
 | 
			
		||||
    pga  0.005586  -0.000376 -0.000752 -0.000376 0.000103  -0.000111 -0.000752 -0.000111 0.000440
 | 
			
		||||
    0.6  0.007509  -0.000662 -0.000688 -0.000662 0.000161  -0.000154 -0.000688 -0.000154 0.000516
 | 
			
		||||
    1.0  0.009119  -0.00075  -0.000948 -0.00075  0.000189  -0.000187 -0.000948 -0.000187 0.000657                 
 | 
			
		||||
    2.0  0.008563  -0.000514 -0.001282 -0.000514 0.000147  -0.000164 -0.001282 -0.000164 0.000696         
 | 
			
		||||
    5.0  0.008283  -0.000516 -0.001202 -0.000516 0.000145  -0.000161 -0.001202 -0.000161 0.000668
 | 
			
		||||
   10.0  0.006904  -0.00036  -0.001145 -0.00036  0.000108  -0.000126 -0.001145 -0.000126 0.000578
 | 
			
		||||
   20.0  0.005389  -0.000396 -0.000658 -0.000396 0.000104  -0.000107 -0.000658 -0.000107 0.000408
 | 
			
		||||
   50.0  0.005874  -0.000449 -0.000678 -0.000449 0.000114  -0.000114 -0.000678 -0.000114 0.000428
 | 
			
		||||
    """)
 | 
			
		||||
@@ -17,7 +17,7 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
 | 
			
		||||
                    then the program looks for a label of 'Mw' for magnitude in the catalog
 | 
			
		||||
        mc: The magnitude of completeness (Mc) of the catalog
 | 
			
		||||
        m_max:M_max. The magnitude distribution is estimated for the range from Mc to M_max. If no value is provided,
 | 
			
		||||
                    then the program sets M_max to be 3 magnitude units above the maximum magnitude value in the catalog.
 | 
			
		||||
                    then the program sets M_max to be 1 magnitude units above the maximum magnitude value in the catalog.
 | 
			
		||||
        m_kde_method: The kernel density estimator to use.
 | 
			
		||||
        xy_select: If True, perform an estimation of the magnitude distribution using KDE with the chosen KDE method
 | 
			
		||||
        grid_dim: The grid cell size (in metres) of the final ground motion product map. A smaller cell size will
 | 
			
		||||
@@ -45,22 +45,18 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    import sys
 | 
			
		||||
    from importlib.metadata import version
 | 
			
		||||
    import logging
 | 
			
		||||
    from base_logger import getDefaultLogger
 | 
			
		||||
    from timeit import default_timer as timer
 | 
			
		||||
    from math import ceil, floor
 | 
			
		||||
    from math import ceil, floor, isnan
 | 
			
		||||
    import numpy as np
 | 
			
		||||
    import scipy
 | 
			
		||||
    import obspy
 | 
			
		||||
    import dask
 | 
			
		||||
    from dask.diagnostics import ProgressBar  # use Dask progress bar
 | 
			
		||||
    import kalepy as kale
 | 
			
		||||
    import utm
 | 
			
		||||
    from skimage.transform import resize
 | 
			
		||||
    import psutil
 | 
			
		||||
    import openquake.engine
 | 
			
		||||
    import igfash
 | 
			
		||||
    from igfash.io import read_mat_cat, read_mat_m, read_mat_pdf, read_csv
 | 
			
		||||
    from igfash.io import read_mat_cat, read_mat_m, read_mat_mc, read_mat_pdf, read_csv
 | 
			
		||||
    from igfash.window import win_CTL, win_CNE
 | 
			
		||||
    import igfash.kde as kde
 | 
			
		||||
    from igfash.gm import compute_IMT_exceedance
 | 
			
		||||
@@ -70,6 +66,9 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
 | 
			
		||||
    import matplotlib.pyplot as plt
 | 
			
		||||
    from matplotlib.ticker import MultipleLocator
 | 
			
		||||
    from matplotlib.contour import ContourSet
 | 
			
		||||
    import xml.etree.ElementTree as ET
 | 
			
		||||
    import json
 | 
			
		||||
    import multiprocessing as mp
 | 
			
		||||
 | 
			
		||||
    logger = getDefaultLogger('igfash')
 | 
			
		||||
 | 
			
		||||
@@ -82,15 +81,14 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
 | 
			
		||||
        m_range = [None]
 | 
			
		||||
    else:
 | 
			
		||||
        m_range = read_mat_m(m_file)
 | 
			
		||||
        m_max = m_range[-1] # take m_max from the m_file
 | 
			
		||||
 | 
			
		||||
    if verbose:
 | 
			
		||||
        logger.setLevel(logging.DEBUG)
 | 
			
		||||
    else:
 | 
			
		||||
        logger.setLevel(logging.INFO)
 | 
			
		||||
 | 
			
		||||
    # temporary hard-coded configuration
 | 
			
		||||
    # exclude_low_fxy = False
 | 
			
		||||
    exclude_low_fxy = True
 | 
			
		||||
    exclude_low_fxy = True # skip low probability areas of the map
 | 
			
		||||
    thresh_fxy = 1e-3  # minimum fxy value (location PDF) needed to do PGA estimation (to skip low probability areas); also should scale according to number of grid points
 | 
			
		||||
 | 
			
		||||
    # log user selections
 | 
			
		||||
@@ -103,25 +101,13 @@ verbose: {verbose}")
 | 
			
		||||
 | 
			
		||||
    # print key package version numbers
 | 
			
		||||
    logger.debug(f"Python version {sys.version}")
 | 
			
		||||
    logger.debug(f"Numpy version {np.__version__}")
 | 
			
		||||
    logger.debug(f"Scipy version {scipy.__version__}")
 | 
			
		||||
    logger.debug(f"Obspy version {obspy.__version__}")
 | 
			
		||||
    logger.debug(f"Openquake version {openquake.engine.__version__}")
 | 
			
		||||
    logger.debug(f"Igfash version {igfash.__version__}")
 | 
			
		||||
 | 
			
		||||
    # print number of cpu cores available
 | 
			
		||||
    ncpu = psutil.cpu_count(logical=False)
 | 
			
		||||
    logger.debug(f"Number of cpu cores available: {ncpu}")
 | 
			
		||||
    for process in psutil.process_iter():
 | 
			
		||||
        with process.oneshot():
 | 
			
		||||
 | 
			
		||||
            # cpu = process.cpu_percent()
 | 
			
		||||
            cpu = process.cpu_percent() / ncpu
 | 
			
		||||
 | 
			
		||||
            if cpu > 1:
 | 
			
		||||
                logger.debug(f"{process.name()}, {cpu}")
 | 
			
		||||
 | 
			
		||||
    logger.debug(f"BASELINE CPU LOAD% {psutil.cpu_percent(interval=None, percpu=True)}")
 | 
			
		||||
    logger.debug(f"Numpy version {version('numpy')}")
 | 
			
		||||
    logger.debug(f"Scipy version {version('scipy')}")
 | 
			
		||||
    logger.debug(f"Obspy version {version('obspy')}")
 | 
			
		||||
    logger.debug(f"Openquake version {version('openquake.engine')}")
 | 
			
		||||
    logger.debug(f"Igfash version {version('igfash')}")
 | 
			
		||||
    logger.debug(f"Rbeast version {version('rbeast')}")
 | 
			
		||||
    logger.debug(f"Dask version {version('dask')}")
 | 
			
		||||
 | 
			
		||||
    dask.config.set(scheduler='processes')
 | 
			
		||||
 | 
			
		||||
@@ -137,18 +123,20 @@ verbose: {verbose}")
 | 
			
		||||
            logger.info("No magnitude label of catalog specified, therefore try Mw by default")
 | 
			
		||||
            mag_label = 'Mw'
 | 
			
		||||
 | 
			
		||||
        # if cat_label == None:
 | 
			
		||||
        #   print("No magnitude label of catalog specified, therefore try 'Catalog' by default")
 | 
			
		||||
        #   cat_label='Catalog'
 | 
			
		||||
 | 
			
		||||
        time, mag, lat, lon, depth = read_mat_cat(catalog_file, mag_label=mag_label, catalog_label='Catalog')
 | 
			
		||||
 | 
			
		||||
        # check for null magnitude values
 | 
			
		||||
        m_null_idx = np.where(np.isnan(mag))[0]
 | 
			
		||||
        if len(m_null_idx) > 0:
 | 
			
		||||
            msg = f"There are null values in the magnitude column of the catalog at indices {m_null_idx}"
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
        if mc != None:
 | 
			
		||||
            logger.info("Mc value provided by user")
 | 
			
		||||
            trim_to_mc = True
 | 
			
		||||
        elif mc_file != None:
 | 
			
		||||
            logger.info("Mc estimation output file provided; selecting largest Mc from the list")
 | 
			
		||||
            mc = read_mc(mc_file)
 | 
			
		||||
            mc = read_mat_mc(mc_file)
 | 
			
		||||
            trim_to_mc = True
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("No Mc provided; using all magnitudes from the catalog")
 | 
			
		||||
@@ -164,9 +152,10 @@ verbose: {verbose}")
 | 
			
		||||
            lat = np.delete(lat, indices)
 | 
			
		||||
            lon = np.delete(lon, indices)
 | 
			
		||||
 | 
			
		||||
        # if user does not provide a m_max, set m_max to 3 magnitude units above max magnitude in catalog
 | 
			
		||||
        # if user does not provide a m_max, set m_max to 1 magnitude unit above max magnitude in catalog
 | 
			
		||||
        if m_max == None:
 | 
			
		||||
            m_max = mag.max() + 3.0
 | 
			
		||||
            m_max = mag.max() + 1.0
 | 
			
		||||
            logger.info(f"No m_max was given. Therefore m_max is automatically set to: {m_max}")
 | 
			
		||||
 | 
			
		||||
        start = timer()
 | 
			
		||||
 | 
			
		||||
@@ -231,14 +220,15 @@ verbose: {verbose}")
 | 
			
		||||
        y_min = y.min()
 | 
			
		||||
        x_max = x.max()
 | 
			
		||||
        y_max = y.max()
 | 
			
		||||
        z_min = depth.min()
 | 
			
		||||
        z_max = depth.max()
 | 
			
		||||
 | 
			
		||||
        grid_x_max = int(ceil(x_max / grid_dim) * grid_dim)
 | 
			
		||||
        grid_x_min = int(floor(x_min / grid_dim) * grid_dim)
 | 
			
		||||
        grid_y_max = int(ceil(y_max / grid_dim) * grid_dim)
 | 
			
		||||
        grid_y_min = int(floor(y_min / grid_dim) * grid_dim)
 | 
			
		||||
 | 
			
		||||
        grid_lat_max, grid_lon_max = utm.to_latlon(grid_x_max, grid_y_max, utm_zone_number, utm_zone_letter)
 | 
			
		||||
        grid_lat_min, grid_lon_min = utm.to_latlon(grid_x_min, grid_y_min, utm_zone_number, utm_zone_letter)
 | 
			
		||||
 | 
			
		||||
        # rectangular grid
 | 
			
		||||
        nx = int((grid_x_max - grid_x_min) / grid_dim) + 1
 | 
			
		||||
        ny = int((grid_y_max - grid_y_min) / grid_dim) + 1
 | 
			
		||||
@@ -262,7 +252,7 @@ verbose: {verbose}")
 | 
			
		||||
        # %% compute KDE and extract PDF
 | 
			
		||||
        start = timer()
 | 
			
		||||
 | 
			
		||||
        if xy_win_method == "TW":
 | 
			
		||||
        if xy_win_method:
 | 
			
		||||
            logger.info("Time weighting function selected")
 | 
			
		||||
 | 
			
		||||
            x_weights = np.linspace(0, 15, len(t_windowed))
 | 
			
		||||
@@ -323,10 +313,10 @@ verbose: {verbose}")
 | 
			
		||||
 | 
			
		||||
    # run activity rate modeling
 | 
			
		||||
    lambdas = [None]
 | 
			
		||||
    if custom_rate != None and forecast_select:
 | 
			
		||||
    if custom_rate != None and forecast_select and not rate_select:
 | 
			
		||||
        logger.info(f"Using activity rate specified by user: {custom_rate} per {time_unit}")
 | 
			
		||||
        lambdas = [custom_rate]
 | 
			
		||||
        lambdas_perc = [1]
 | 
			
		||||
        lambdas = np.array([custom_rate], dtype='d')
 | 
			
		||||
        lambdas_perc = np.array([1], dtype='d')
 | 
			
		||||
 | 
			
		||||
    elif rate_select:
 | 
			
		||||
        logger.info(f"Activity rate modeling selected")
 | 
			
		||||
@@ -344,6 +334,12 @@ verbose: {verbose}")
 | 
			
		||||
        elif time_unit == 'years':
 | 
			
		||||
            multiplicator = 1 / 365
 | 
			
		||||
 | 
			
		||||
        # Raise an exception when time_win_duration from the user is too large relative to the catalog
 | 
			
		||||
        if time_win_duration/multiplicator > 0.5*(time[-1] - time[0]):
 | 
			
		||||
            msg = "Activity rate estimation time window must be less than half the catalog length. Use a shorter time window."
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
 | 
			
		||||
        # Selects dates in datenum format and procceeds to forecast value
 | 
			
		||||
        start_date = datenum_data[-1] - (2 * time_win_duration / multiplicator)
 | 
			
		||||
        dates_calc = [date for date in datenum_data if start_date <= date <= datenum_data[-1]]
 | 
			
		||||
@@ -360,10 +356,12 @@ verbose: {verbose}")
 | 
			
		||||
        act_rate, bin_counts, bin_edges, out, pprs, rt, idx, u_e = calc_bins(np.array(datenum_data), time_unit,
 | 
			
		||||
                                                                             time_win_duration, dates_calc,
 | 
			
		||||
                                                                             rate_forecast, rate_unc_high, rate_unc_low,
 | 
			
		||||
                                                                             multiplicator, quiet=True)
 | 
			
		||||
                                                                             multiplicator, quiet=True, figsize=(14,9))
 | 
			
		||||
 | 
			
		||||
        # Assign probabilities 
 | 
			
		||||
        lambdas, lambdas_perc = lambda_probs(act_rate, dates_calc, bin_edges)
 | 
			
		||||
        lambdas = np.array(lambdas, dtype='d')
 | 
			
		||||
        lambdas_perc = np.array(lambdas_perc, dtype='d')
 | 
			
		||||
        
 | 
			
		||||
        # print("Forecasted activity rates: ", lambdas, "events per", time_unit[:-1])
 | 
			
		||||
        logger.info(f"Forecasted activity rates: {lambdas} events per {time_unit} with percentages {lambdas_perc}")
 | 
			
		||||
@@ -372,18 +370,29 @@ verbose: {verbose}")
 | 
			
		||||
 | 
			
		||||
    if forecast_select:
 | 
			
		||||
        products = products_string.split()
 | 
			
		||||
        logger.info(
 | 
			
		||||
            f"Ground motion forecasting selected with ground motion model {model} and IMT products {products_string}")
 | 
			
		||||
        logger.info(f"Ground motion forecasting selected with ground motion model {model} and IMT products {products_string}")
 | 
			
		||||
 | 
			
		||||
        # validate m_max against the grond motion model
 | 
			
		||||
        models_anthro_limited = ['Lasocki2013', 'Atkinson2015', 'ConvertitoEtAl2012Geysers'] # these models require that m_max<=4.5
 | 
			
		||||
        if m_max > 4.5 and model in models_anthro_limited:
 | 
			
		||||
            if m_file is None: 
 | 
			
		||||
                msg = f"The selected ground motion model {model} is only valid for magnitudes up to 4.5. Please select a lower maximum magnitude."
 | 
			
		||||
            else:
 | 
			
		||||
                msg = f"The selected ground motion model {model} is only valid for magnitudes up to 4.5, but the provided magnitude file includes values up to {m_max}. Please adjust the magnitude range in the file accordingly."
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
 | 
			
		||||
        if not xy_select:
 | 
			
		||||
            msg = "Event location distribution modeling was not selected; cannot continue..."
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
        elif m_pdf[0] == None:
 | 
			
		||||
 | 
			
		||||
        if m_pdf[0] == None:
 | 
			
		||||
            msg = "Magnitude distribution modeling was not selected and magnitude PDF file was not provided; cannot continue..."
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
        elif lambdas[0] == None:
 | 
			
		||||
        
 | 
			
		||||
        if lambdas[0] == None:
 | 
			
		||||
            msg = "Activity rate modeling was not selected and custom activity rate was not provided; cannot continue..."
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
@@ -421,7 +430,10 @@ verbose: {verbose}")
 | 
			
		||||
            rx_lat[i], rx_lon[i] = utm.to_latlon(x_rx[i], y_rx[i], utm_zone_number,
 | 
			
		||||
                                                 utm_zone_letter)  # get receiver location as lat,lon
 | 
			
		||||
 | 
			
		||||
        # experimental - compute ground motion only at grid points that have minimum probability density of thresh_fxy
 | 
			
		||||
        # convert distances from m to km because openquake ground motion models take input distances in kilometres
 | 
			
		||||
        distances = distances/1000.0
 | 
			
		||||
 | 
			
		||||
        # compute ground motion only at grid points that have minimum probability density of thresh_fxy
 | 
			
		||||
        if exclude_low_fxy:
 | 
			
		||||
            indices = list(np.where(fxy.flatten() > thresh_fxy)[0])
 | 
			
		||||
        else:
 | 
			
		||||
@@ -434,25 +446,56 @@ verbose: {verbose}")
 | 
			
		||||
 | 
			
		||||
        PGA = np.zeros(shape=(nx * ny))
 | 
			
		||||
 | 
			
		||||
        # use dask parallel computing
 | 
			
		||||
        start = timer()
 | 
			
		||||
        pbar = ProgressBar()
 | 
			
		||||
        pbar.register()
 | 
			
		||||
        # iter = range(0,len(distances))
 | 
			
		||||
        iter = indices
 | 
			
		||||
        iml_grid_raw = []  # raw ground motion grids
 | 
			
		||||
        for imt in products:
 | 
			
		||||
            logger.info(f"Estimating {imt}")
 | 
			
		||||
 | 
			
		||||
            imls = [dask.delayed(compute_IMT_exceedance)(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas,
 | 
			
		||||
                                                         forecast_len, lambdas_perc, m_range, m_pdf, m_cdf, model,
 | 
			
		||||
                                                         log_level=logging.DEBUG, imt=imt, IMT_min=0.0, IMT_max=2.0,
 | 
			
		||||
                                                         rx_label=i) for i in iter]
 | 
			
		||||
            iml = dask.compute(*imls)
 | 
			
		||||
            iml_grid_raw.append(list(iml))
 | 
			
		||||
        use_pp = True
 | 
			
		||||
 | 
			
		||||
        if use_pp:         # use dask parallel computing
 | 
			
		||||
            mp.set_start_method("fork", force=True)
 | 
			
		||||
            iter = indices
 | 
			
		||||
            iml_grid_raw = []  # raw ground motion grids
 | 
			
		||||
            for imt in products:
 | 
			
		||||
                logger.info(f"Estimating {imt}")
 | 
			
		||||
 | 
			
		||||
                if imt == "PGV":
 | 
			
		||||
                    IMT_max = 200 # search interval max for velocity (cm/s)
 | 
			
		||||
                else:
 | 
			
		||||
                    IMT_max = 2.0 # search interval max for acceleration (g)
 | 
			
		||||
 | 
			
		||||
                imls = [dask.delayed(compute_IMT_exceedance)(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas,
 | 
			
		||||
                                                            forecast_len, lambdas_perc, m_range, m_pdf, m_cdf, model,
 | 
			
		||||
                                                            log_level=logging.DEBUG, imt=imt, IMT_min=0.0, IMT_max=IMT_max, rx_label=i,
 | 
			
		||||
                                                            rtol=0.1, use_cython=True) for i in iter]
 | 
			
		||||
                iml = dask.compute(*imls)
 | 
			
		||||
                iml_grid_raw.append(list(iml))
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            iml_grid_raw = []
 | 
			
		||||
            iter = indices
 | 
			
		||||
            for imt in products:
 | 
			
		||||
 | 
			
		||||
                if imt == "PGV":
 | 
			
		||||
                    IMT_max = 200 # search interval max for velocity (cm/s)
 | 
			
		||||
                else:
 | 
			
		||||
                    IMT_max = 2.0 # search interval max for acceleration (g)
 | 
			
		||||
 | 
			
		||||
                iml = []
 | 
			
		||||
                for i in iter:
 | 
			
		||||
                    iml_i = compute_IMT_exceedance(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas, forecast_len, 
 | 
			
		||||
                                                    lambdas_perc, m_range, m_pdf, m_cdf, model, imt=imt, IMT_min = 0.0,
 | 
			
		||||
                                                    IMT_max = IMT_max, rx_label = i, rtol = 0.1, use_cython=True)
 | 
			
		||||
                    iml.append(iml_i)
 | 
			
		||||
                    logger.info(f"Estimated {imt} at rx {i} is {iml_i}")
 | 
			
		||||
                iml_grid_raw.append(iml)
 | 
			
		||||
 | 
			
		||||
        end = timer()
 | 
			
		||||
        logger.info(f"Ground motion exceedance computation time: {round(end - start, 1)} seconds")
 | 
			
		||||
 | 
			
		||||
        if np.isnan(iml_grid_raw).all():
 | 
			
		||||
            msg = "No valid ground motion intensity measures were forecasted. Try a different ground motion model."
 | 
			
		||||
            logger.error(msg)
 | 
			
		||||
            raise Exception(msg)
 | 
			
		||||
 | 
			
		||||
        # create list of one empty list for each imt
 | 
			
		||||
        iml_grid = [[] for _ in range(len(products))]  # final ground motion grids
 | 
			
		||||
        iml_grid_prep = iml_grid.copy()  # temp ground motion grids
 | 
			
		||||
@@ -475,12 +518,21 @@ verbose: {verbose}")
 | 
			
		||||
                dtype=np.float64)  # this reduces values to 8 decimal places
 | 
			
		||||
            iml_grid_tmp = np.nan_to_num(iml_grid[j])  # change nans to zeroes
 | 
			
		||||
 | 
			
		||||
            # upscale the grid
 | 
			
		||||
            up_factor = 4
 | 
			
		||||
            iml_grid_hd = resize(iml_grid_tmp, (up_factor * len(iml_grid_tmp), up_factor * len(iml_grid_tmp)),
 | 
			
		||||
                                 mode='reflect', anti_aliasing=False)
 | 
			
		||||
            # upscale the grid, trim, and interpolate if there are at least 10 grid values with range greater than 0.1
 | 
			
		||||
            if np.count_nonzero(iml_grid_tmp) >= 10 and vmax-vmin > 0.1:
 | 
			
		||||
                up_factor = 4
 | 
			
		||||
                iml_grid_hd = resize(iml_grid_tmp, (up_factor * len(iml_grid_tmp), up_factor * len(iml_grid_tmp)),
 | 
			
		||||
                                    mode='reflect', anti_aliasing=False)
 | 
			
		||||
                trim_thresh = vmin
 | 
			
		||||
                iml_grid_hd[iml_grid_hd < trim_thresh] = np.nan
 | 
			
		||||
            else:
 | 
			
		||||
                iml_grid_hd = iml_grid_tmp
 | 
			
		||||
 | 
			
		||||
            iml_grid_hd[iml_grid_hd == 0.0] = np.nan  # change zeroes back to nan
 | 
			
		||||
 | 
			
		||||
            #vmin_hd = min(x for x in iml_grid_hd.flatten() if not isnan(x))
 | 
			
		||||
            vmax_hd = max(x for x in iml_grid_hd.flatten() if not isnan(x))
 | 
			
		||||
 | 
			
		||||
            # generate image overlay
 | 
			
		||||
            north, south = lat.max(), lat.min()  # Latitude range
 | 
			
		||||
            east, west = lon.max(), lon.min()  # Longitude range
 | 
			
		||||
@@ -489,17 +541,27 @@ verbose: {verbose}")
 | 
			
		||||
            map_center = [np.mean([north, south]), np.mean([east, west])]
 | 
			
		||||
 | 
			
		||||
            # Create an image from the grid
 | 
			
		||||
            cmap_name = 'YlOrRd'
 | 
			
		||||
            cmap = plt.get_cmap(cmap_name)
 | 
			
		||||
            fig, ax = plt.subplots(figsize=(6, 6))
 | 
			
		||||
            ax.imshow(iml_grid_hd, origin='lower', cmap='viridis')
 | 
			
		||||
            ax.imshow(iml_grid_hd, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax, interpolation='bilinear')
 | 
			
		||||
            ax.axis('off')
 | 
			
		||||
 | 
			
		||||
            # Save the figure
 | 
			
		||||
            fig.canvas.draw()
 | 
			
		||||
            plt.savefig("overlay_" + str(j) + ".svg", bbox_inches="tight", pad_inches=0, transparent=True)
 | 
			
		||||
            overlay_filename = f"overlay_{j}.svg"
 | 
			
		||||
            plt.savefig(overlay_filename, bbox_inches="tight", pad_inches=0, transparent=True)
 | 
			
		||||
            plt.close(fig)
 | 
			
		||||
 | 
			
		||||
            # Embed geographic bounding box into the SVG
 | 
			
		||||
            map_bounds = dict(zip(("south", "west", "north", "east"),
 | 
			
		||||
                            map(float, (grid_lat_min, grid_lon_min, grid_lat_max, grid_lon_max))))
 | 
			
		||||
            tree = ET.parse(overlay_filename)
 | 
			
		||||
            tree.getroot().set("data-map-bounds", json.dumps(map_bounds))
 | 
			
		||||
            tree.write(overlay_filename, encoding="utf-8", xml_declaration=True)
 | 
			
		||||
            logger.info(f"Saved geographic bounds to SVG metadata (data-map-bounds): {overlay_filename} → {map_bounds}")
 | 
			
		||||
 | 
			
		||||
            # Make the color bar
 | 
			
		||||
            cmap_name = 'viridis'
 | 
			
		||||
            width = 50
 | 
			
		||||
            height = 500
 | 
			
		||||
 | 
			
		||||
@@ -507,16 +569,20 @@ verbose: {verbose}")
 | 
			
		||||
            gradient = np.vstack((gradient, gradient)).T
 | 
			
		||||
            gradient = np.tile(gradient, (1, width))
 | 
			
		||||
 | 
			
		||||
            colorbar_title = products[j]
 | 
			
		||||
            if "PGA" in colorbar_title or "SA" in colorbar_title:
 | 
			
		||||
                colorbar_title = colorbar_title + "  (g)"
 | 
			
		||||
 | 
			
		||||
            fig, ax = plt.subplots(figsize=((width + 40) / 100.0, (height + 20) / 100.0),
 | 
			
		||||
                                   dpi=100)  # Increase fig size for labels
 | 
			
		||||
            ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(cmap_name),
 | 
			
		||||
                      extent=[0, 1, vmin, vmax])  # Note: extent order is different for vertical
 | 
			
		||||
            ax.imshow(gradient, aspect='auto', cmap=cmap.reversed(),
 | 
			
		||||
                      extent=[0, 1, vmin, vmax_hd])  # Note: extent order is different for vertical
 | 
			
		||||
            ax.set_xticks([])  # Remove x-ticks for vertical colorbar
 | 
			
		||||
            num_ticks = 11  # Show more ticks
 | 
			
		||||
            tick_positions = np.linspace(vmin, vmax, num_ticks)
 | 
			
		||||
            tick_positions = np.linspace(vmin, vmax_hd, num_ticks)
 | 
			
		||||
            ax.set_yticks(tick_positions)
 | 
			
		||||
            ax.set_yticklabels([f"{tick:.2f}" for tick in tick_positions])  # format tick labels
 | 
			
		||||
            ax.set_title(imt, pad=15)
 | 
			
		||||
            ax.set_title(colorbar_title, loc='right', pad=15) 
 | 
			
		||||
            fig.subplots_adjust(left=0.25, right=0.75, bottom=0.05, top=0.95)  # Adjust Layout
 | 
			
		||||
            fig.savefig("colorbar_" + str(j) + ".svg", bbox_inches='tight')
 | 
			
		||||
            plt.close(fig)
 | 
			
		||||
 
 | 
			
		||||
@@ -23,6 +23,15 @@ from seismic_hazard_forecasting import main as shf
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main(argv):
 | 
			
		||||
 | 
			
		||||
    def str2bool(v):
 | 
			
		||||
        if v.lower() in ("True", "TRUE", "yes", "true", "t", "y", "1"):
 | 
			
		||||
            return True
 | 
			
		||||
        elif v.lower() in ("False", "FALSE", "no", "false", "f", "n", "0"):
 | 
			
		||||
            return False
 | 
			
		||||
        else:
 | 
			
		||||
            raise argparse.ArgumentTypeError("Boolean value expected.")
 | 
			
		||||
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("catalog_file", help="Path to input file of type 'catalog'")
 | 
			
		||||
@@ -30,23 +39,23 @@ def main(argv):
 | 
			
		||||
    parser.add_argument("--pdf_file", help="Path to input file of type 'PDF'", type=str, default=None, required=False)
 | 
			
		||||
    parser.add_argument("--m_file", help="Path to input file of type 'm'", type=str, default=None, required=False)
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--m_select", type=bool)
 | 
			
		||||
    parser.add_argument("--m_select", type=str2bool)
 | 
			
		||||
    parser.add_argument("--mag_label", type=str, default=None, required=False)
 | 
			
		||||
    parser.add_argument("--mc", type=float, default=None, required=False)
 | 
			
		||||
    parser.add_argument("--m_max", type=float, default=None, required=False)
 | 
			
		||||
    parser.add_argument("--m_kde_method", type=str)
 | 
			
		||||
    parser.add_argument("--xy_select", type=bool)
 | 
			
		||||
    parser.add_argument("--xy_select", type=str2bool)
 | 
			
		||||
    parser.add_argument("--grid_dim", type=int)
 | 
			
		||||
    parser.add_argument("--xy_win_method", type=bool)
 | 
			
		||||
    parser.add_argument("--rate_select", type=bool)
 | 
			
		||||
    parser.add_argument("--xy_win_method", type=str2bool)
 | 
			
		||||
    parser.add_argument("--rate_select", type=str2bool)
 | 
			
		||||
    parser.add_argument("--time_win_duration", type=float)
 | 
			
		||||
    parser.add_argument("--forecast_select", type=bool)
 | 
			
		||||
    parser.add_argument("--forecast_select", type=str2bool)
 | 
			
		||||
    parser.add_argument("--custom_rate", type=float, default=None, required=False)
 | 
			
		||||
    parser.add_argument("--forecast_len", type=float)
 | 
			
		||||
    parser.add_argument("--time_unit", type=str)
 | 
			
		||||
    parser.add_argument("--model", type=str)
 | 
			
		||||
    parser.add_argument("--products_string", type=str)
 | 
			
		||||
    parser.add_argument("--verbose", type=bool)
 | 
			
		||||
    parser.add_argument("--verbose", type=str2bool)
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    shf(**vars(args))
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user