Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

3 changed files with 60 additions and 273 deletions

View File

@ -1,13 +0,0 @@
Copyright 2025 Institute of Geophysics, Polish Academy of Sciences
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,142 +0,0 @@
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2023 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`Lasocki2013`.
"""
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
def get_magnitude_energy(mag):
"""
Converts magnitude to energy-based magnitude term.
"""
return 1.15 + 1.96 * mag
def get_distance_term(coeffs, repi):
"""
Computes the distance term using the given GMPE coefficients.
"""
R_h = np.sqrt(repi ** 2 + coeffs["c7"] ** 2)
return np.log10(R_h)
def get_standard_deviation(coeffs, coeffs_cov, magE, repi):
"""
Computes the standard deviation term.
"""
Cb = np.array(list(coeffs_cov)).reshape(3,3)
R_h = np.sqrt(repi ** 2 + coeffs["c7"] ** 2)
X0 = np.array([1, magE[0], np.log10(R_h[0]**2)])
variance_term = np.sqrt(X0 @ Cb @ X0 + coeffs["sigma"]**2)
return variance_term
class Lasocki2013(GMPE):
"""
Implement equation developed by Lasocki in "REPORT ON THE ATTENUATION
RELATIONS OF PEAK GROUND ACCELERATION AND SPECTRAL ORDINATES OF GROUND
MOTION FOR MINING-INDUCED SEISMIC EVENTS IN THE REGION OF THE ŻELAZNY
MOST REPOSITORY", 2009.
Equation coefficients provided for the random horizontal component
"""
#: Supported tectonic region type is induced, given
#: that the equations have been derived for the LGCD mining area
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.INDUCED
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, SA}
#: Supported intensity measure component is random horizontal
#: :attr:`~openquake.hazardlib.const.IMC.RANDOM_HORIZONTAL`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RANDOM_HORIZONTAL
# DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.VERTICAL
#: Supported standard deviation type is total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {const.StdDev.TOTAL}
#: site params are not required
REQUIRES_SITES_PARAMETERS = set()
#: Required rupture parameter is magnitude
REQUIRES_RUPTURE_PARAMETERS = {'mag'}
#: Required distance measure is epicentral distance
#: see paragraph 'Predictor Variables', page 6.
REQUIRES_DISTANCES = {'repi'}
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
"""
Computes mean ground motion values and standard deviations for Lasocki (2013) GMPE.
Parameters:
repi (float): Epicentral distance (m)
mag (float): Earthquake magnitude
imts (list of str): List of intensity measure types (e.g., 'PHA', 'PVA')
mean (np.array): Array to store computed mean values
sig (np.array): Array to store computed standard deviations
"""
# Loop through each IMT and compute values
for i, imt in enumerate(imts):
C = self.COEFFS[imt]
C_cov = self.COEFFS_COV[imt]
mag = ctx.mag
repi = ctx.repi*1000.0 # Convert distance from km to m
# Compute magnitude energy term
magE = get_magnitude_energy(mag)
# Compute GMPE terms
mag_term = C['c1'] + C['c2'] * magE
dist_term = C['c5'] * get_distance_term(C, repi)
# Compute mean ground motion
imean = mag_term + dist_term
mean_value = np.log((10 ** imean) / g) # Convert to natural log scale and divide by g
mean[i] = mean_value
# Compute standard deviation
sigma = get_standard_deviation(C, C_cov, magE, repi)
sig[i] = sigma
#: coefficient table provided by report
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c5 c7 sigma N
pga 1.25 0.31 -1.34 558 0.196 1196
0.6 -3.86 0.55 -0.65 183 0.242 1194
1.0 -3.94 0.62 -0.66 308 0.262 1197
2.0 -1.14 0.47 -1.02 741 0.235 1195
5.0 0.99 0.31 -1.15 690 0.234 1206
10.0 3.06 0.27 -1.65 906 0.203 1192
20.0 2.62 0.27 -1.60 435 0.196 1191
50.0 2.09 0.27 -1.48 375 0.204 1191
""")
COEFFS_COV = CoeffsTable(sa_damping=5, table="""\
IMT Cb00 Cb01 Cb02 Cb10 Cb11 Cb12 Cb20 Cb21 Cb22
pga 0.005586 -0.000376 -0.000752 -0.000376 0.000103 -0.000111 -0.000752 -0.000111 0.000440
0.6 0.007509 -0.000662 -0.000688 -0.000662 0.000161 -0.000154 -0.000688 -0.000154 0.000516
1.0 0.009119 -0.00075 -0.000948 -0.00075 0.000189 -0.000187 -0.000948 -0.000187 0.000657
2.0 0.008563 -0.000514 -0.001282 -0.000514 0.000147 -0.000164 -0.001282 -0.000164 0.000696
5.0 0.008283 -0.000516 -0.001202 -0.000516 0.000145 -0.000161 -0.001202 -0.000161 0.000668
10.0 0.006904 -0.00036 -0.001145 -0.00036 0.000108 -0.000126 -0.001145 -0.000126 0.000578
20.0 0.005389 -0.000396 -0.000658 -0.000396 0.000104 -0.000107 -0.000658 -0.000107 0.000408
50.0 0.005874 -0.000449 -0.000678 -0.000449 0.000114 -0.000114 -0.000678 -0.000114 0.000428
""")

View File

@ -17,7 +17,7 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
then the program looks for a label of 'Mw' for magnitude in the catalog
mc: The magnitude of completeness (Mc) of the catalog
m_max:M_max. The magnitude distribution is estimated for the range from Mc to M_max. If no value is provided,
then the program sets M_max to be 1 magnitude units above the maximum magnitude value in the catalog.
then the program sets M_max to be 3 magnitude units above the maximum magnitude value in the catalog.
m_kde_method: The kernel density estimator to use.
xy_select: If True, perform an estimation of the magnitude distribution using KDE with the chosen KDE method
grid_dim: The grid cell size (in metres) of the final ground motion product map. A smaller cell size will
@ -45,19 +45,22 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
"""
import sys
from importlib.metadata import version
import logging
from base_logger import getDefaultLogger
from timeit import default_timer as timer
from math import ceil, floor, isnan
from math import ceil, floor
import numpy as np
import scipy
import obspy
import dask
from dask.diagnostics import ProgressBar # use Dask progress bar
import kalepy as kale
import utm
from skimage.transform import resize
import psutil
import openquake.engine
import igfash
from igfash.io import read_mat_cat, read_mat_m, read_mat_mc, read_mat_pdf, read_csv
from igfash.io import read_mat_cat, read_mat_m, read_mat_pdf, read_csv
from igfash.window import win_CTL, win_CNE
import igfash.kde as kde
from igfash.gm import compute_IMT_exceedance
@ -67,8 +70,6 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.contour import ContourSet
import xml.etree.ElementTree as ET
import json
logger = getDefaultLogger('igfash')
@ -81,7 +82,6 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
m_range = [None]
else:
m_range = read_mat_m(m_file)
m_max = m_range[-1] # take m_max from the m_file
if verbose:
logger.setLevel(logging.DEBUG)
@ -103,13 +103,25 @@ verbose: {verbose}")
# print key package version numbers
logger.debug(f"Python version {sys.version}")
logger.debug(f"Numpy version {version('numpy')}")
logger.debug(f"Scipy version {version('scipy')}")
logger.debug(f"Obspy version {version('obspy')}")
logger.debug(f"Openquake version {version('openquake.engine')}")
logger.debug(f"Igfash version {version('igfash')}")
logger.debug(f"Rbeast version {version('rbeast')}")
logger.debug(f"Dask version {version('dask')}")
logger.debug(f"Numpy version {np.__version__}")
logger.debug(f"Scipy version {scipy.__version__}")
logger.debug(f"Obspy version {obspy.__version__}")
logger.debug(f"Openquake version {openquake.engine.__version__}")
logger.debug(f"Igfash version {igfash.__version__}")
# print number of cpu cores available
ncpu = psutil.cpu_count(logical=False)
logger.debug(f"Number of cpu cores available: {ncpu}")
for process in psutil.process_iter():
with process.oneshot():
# cpu = process.cpu_percent()
cpu = process.cpu_percent() / ncpu
if cpu > 1:
logger.debug(f"{process.name()}, {cpu}")
logger.debug(f"BASELINE CPU LOAD% {psutil.cpu_percent(interval=None, percpu=True)}")
dask.config.set(scheduler='processes')
@ -131,18 +143,12 @@ verbose: {verbose}")
time, mag, lat, lon, depth = read_mat_cat(catalog_file, mag_label=mag_label, catalog_label='Catalog')
# check for null magnitude values
m_null_idx = np.where(np.isnan(mag))[0]
if len(m_null_idx) > 0:
msg = f"There are null values in the magnitude column of the catalog at indices {m_null_idx}"
logger.error(msg)
raise Exception(msg)
if mc != None:
logger.info("Mc value provided by user")
trim_to_mc = True
elif mc_file != None:
logger.info("Mc estimation output file provided; selecting largest Mc from the list")
mc = read_mat_mc(mc_file)
mc = read_mc(mc_file)
trim_to_mc = True
else:
logger.info("No Mc provided; using all magnitudes from the catalog")
@ -158,10 +164,9 @@ verbose: {verbose}")
lat = np.delete(lat, indices)
lon = np.delete(lon, indices)
# if user does not provide a m_max, set m_max to 1 magnitude unit above max magnitude in catalog
# if user does not provide a m_max, set m_max to 3 magnitude units above max magnitude in catalog
if m_max == None:
m_max = mag.max() + 1.0
logger.info(f"No m_max was given. Therefore m_max is automatically set to: {m_max}")
m_max = mag.max() + 3.0
start = timer()
@ -226,15 +231,14 @@ verbose: {verbose}")
y_min = y.min()
x_max = x.max()
y_max = y.max()
z_min = depth.min()
z_max = depth.max()
grid_x_max = int(ceil(x_max / grid_dim) * grid_dim)
grid_x_min = int(floor(x_min / grid_dim) * grid_dim)
grid_y_max = int(ceil(y_max / grid_dim) * grid_dim)
grid_y_min = int(floor(y_min / grid_dim) * grid_dim)
grid_lat_max, grid_lon_max = utm.to_latlon(grid_x_max, grid_y_max, utm_zone_number, utm_zone_letter)
grid_lat_min, grid_lon_min = utm.to_latlon(grid_x_min, grid_y_min, utm_zone_number, utm_zone_letter)
# rectangular grid
nx = int((grid_x_max - grid_x_min) / grid_dim) + 1
ny = int((grid_y_max - grid_y_min) / grid_dim) + 1
@ -321,8 +325,8 @@ verbose: {verbose}")
lambdas = [None]
if custom_rate != None and forecast_select:
logger.info(f"Using activity rate specified by user: {custom_rate} per {time_unit}")
lambdas = np.array([custom_rate], dtype='d')
lambdas_perc = np.array([1], dtype='d')
lambdas = [custom_rate]
lambdas_perc = [1]
elif rate_select:
logger.info(f"Activity rate modeling selected")
@ -340,12 +344,6 @@ verbose: {verbose}")
elif time_unit == 'years':
multiplicator = 1 / 365
# Raise an exception when time_win_duration from the user is too large relative to the catalog
if time_win_duration/multiplicator > 0.5*(time[-1] - time[0]):
msg = "Activity rate estimation time window must be less than half the catalog length. Use a shorter time window."
logger.error(msg)
raise Exception(msg)
# Selects dates in datenum format and procceeds to forecast value
start_date = datenum_data[-1] - (2 * time_win_duration / multiplicator)
dates_calc = [date for date in datenum_data if start_date <= date <= datenum_data[-1]]
@ -362,7 +360,7 @@ verbose: {verbose}")
act_rate, bin_counts, bin_edges, out, pprs, rt, idx, u_e = calc_bins(np.array(datenum_data), time_unit,
time_win_duration, dates_calc,
rate_forecast, rate_unc_high, rate_unc_low,
multiplicator, quiet=True, figsize=(14,9))
multiplicator, quiet=True)
# Assign probabilities
lambdas, lambdas_perc = lambda_probs(act_rate, dates_calc, bin_edges)
@ -374,29 +372,18 @@ verbose: {verbose}")
if forecast_select:
products = products_string.split()
logger.info(f"Ground motion forecasting selected with ground motion model {model} and IMT products {products_string}")
# validate m_max against the grond motion model
models_anthro_limited = ['Lasocki2013', 'Atkinson2015', 'ConvertitoEtAl2012Geysers'] # these models require that m_max<=4.5
if m_max > 4.5 and model in models_anthro_limited:
if m_file is None:
msg = f"The selected ground motion model {model} is only valid for magnitudes up to 4.5. Please select a lower maximum magnitude."
else:
msg = f"The selected ground motion model {model} is only valid for magnitudes up to 4.5, but the provided magnitude file includes values up to {m_max}. Please adjust the magnitude range in the file accordingly."
logger.error(msg)
raise Exception(msg)
logger.info(
f"Ground motion forecasting selected with ground motion model {model} and IMT products {products_string}")
if not xy_select:
msg = "Event location distribution modeling was not selected; cannot continue..."
logger.error(msg)
raise Exception(msg)
if m_pdf[0] == None:
elif m_pdf[0] == None:
msg = "Magnitude distribution modeling was not selected and magnitude PDF file was not provided; cannot continue..."
logger.error(msg)
raise Exception(msg)
if lambdas[0] == None:
elif lambdas[0] == None:
msg = "Activity rate modeling was not selected and custom activity rate was not provided; cannot continue..."
logger.error(msg)
raise Exception(msg)
@ -434,10 +421,7 @@ verbose: {verbose}")
rx_lat[i], rx_lon[i] = utm.to_latlon(x_rx[i], y_rx[i], utm_zone_number,
utm_zone_letter) # get receiver location as lat,lon
# convert distances from m to km because openquake ground motion models take input distances in kilometres
distances = distances/1000.0
# compute ground motion only at grid points that have minimum probability density of thresh_fxy
# experimental - compute ground motion only at grid points that have minimum probability density of thresh_fxy
if exclude_low_fxy:
indices = list(np.where(fxy.flatten() > thresh_fxy)[0])
else:
@ -450,47 +434,25 @@ verbose: {verbose}")
PGA = np.zeros(shape=(nx * ny))
# use dask parallel computing
start = timer()
pbar = ProgressBar()
pbar.register()
# iter = range(0,len(distances))
iter = indices
iml_grid_raw = [] # raw ground motion grids
for imt in products:
logger.info(f"Estimating {imt}")
use_pp = False
if use_pp: # use dask parallel computing
pbar = ProgressBar()
pbar.register()
# iter = range(0,len(distances))
iter = indices
iml_grid_raw = [] # raw ground motion grids
for imt in products:
logger.info(f"Estimating {imt}")
imls = [dask.delayed(compute_IMT_exceedance)(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas,
forecast_len, lambdas_perc, m_range, m_pdf, m_cdf, model,
log_level=logging.DEBUG, imt=imt, IMT_min=0.0, IMT_max=2.0, rx_label=i,
rtol=0.1, use_cython=True) for i in iter]
iml = dask.compute(*imls)
iml_grid_raw.append(list(iml))
else:
iml_grid_raw = []
iter = indices
for imt in products:
iml = []
for i in iter:
iml_i = compute_IMT_exceedance(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas, forecast_len,
lambdas_perc, m_range, m_pdf, m_cdf, model, imt=imt, IMT_min = 0.0,
IMT_max = 2.0, rx_label = i, rtol = 0.1, use_cython=True)
iml.append(iml_i)
logger.info(f"Estimated {imt} at rx {i} is {iml_i}")
iml_grid_raw.append(iml)
imls = [dask.delayed(compute_IMT_exceedance)(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas,
forecast_len, lambdas_perc, m_range, m_pdf, m_cdf, model,
log_level=logging.DEBUG, imt=imt, IMT_min=0.0, IMT_max=2.0,
rx_label=i) for i in iter]
iml = dask.compute(*imls)
iml_grid_raw.append(list(iml))
end = timer()
logger.info(f"Ground motion exceedance computation time: {round(end - start, 1)} seconds")
if np.isnan(iml_grid_raw).all():
msg = "No valid ground motion intensity measures were forecasted. Try a different ground motion model."
logger.error(msg)
raise Exception(msg)
# create list of one empty list for each imt
iml_grid = [[] for _ in range(len(products))] # final ground motion grids
iml_grid_prep = iml_grid.copy() # temp ground motion grids
@ -519,12 +481,6 @@ verbose: {verbose}")
mode='reflect', anti_aliasing=False)
iml_grid_hd[iml_grid_hd == 0.0] = np.nan # change zeroes back to nan
# trim edges so the grid is not so blocky
vmin_hd = min(x for x in iml_grid_hd.flatten() if not isnan(x))
vmax_hd = max(x for x in iml_grid_hd.flatten() if not isnan(x))
trim_thresh = vmin
iml_grid_hd[iml_grid_hd < trim_thresh] = np.nan
# generate image overlay
north, south = lat.max(), lat.min() # Latitude range
east, west = lon.max(), lon.min() # Longitude range
@ -533,27 +489,17 @@ verbose: {verbose}")
map_center = [np.mean([north, south]), np.mean([east, west])]
# Create an image from the grid
cmap_name = 'YlOrRd'
cmap = plt.get_cmap(cmap_name)
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(iml_grid_hd, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
ax.imshow(iml_grid_hd, origin='lower', cmap='viridis')
ax.axis('off')
# Save the figure
fig.canvas.draw()
overlay_filename = f"overlay_{j}.svg"
plt.savefig(overlay_filename, bbox_inches="tight", pad_inches=0, transparent=True)
plt.savefig("overlay_" + str(j) + ".svg", bbox_inches="tight", pad_inches=0, transparent=True)
plt.close(fig)
# Embed geographic bounding box into the SVG
map_bounds = dict(zip(("south", "west", "north", "east"),
map(float, (grid_lat_min, grid_lon_min, grid_lat_max, grid_lon_max))))
tree = ET.parse(overlay_filename)
tree.getroot().set("data-map-bounds", json.dumps(map_bounds))
tree.write(overlay_filename, encoding="utf-8", xml_declaration=True)
logger.info(f"Saved geographic bounds to SVG metadata (data-map-bounds): {overlay_filename} → {map_bounds}")
# Make the color bar
cmap_name = 'viridis'
width = 50
height = 500
@ -561,20 +507,16 @@ verbose: {verbose}")
gradient = np.vstack((gradient, gradient)).T
gradient = np.tile(gradient, (1, width))
colorbar_title = products[j]
if "PGA" in colorbar_title or "SA" in colorbar_title:
colorbar_title = colorbar_title + " (g)"
fig, ax = plt.subplots(figsize=((width + 40) / 100.0, (height + 20) / 100.0),
dpi=100) # Increase fig size for labels
ax.imshow(gradient, aspect='auto', cmap=cmap.reversed(),
extent=[0, 1, vmin, vmax_hd]) # Note: extent order is different for vertical
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(cmap_name),
extent=[0, 1, vmin, vmax]) # Note: extent order is different for vertical
ax.set_xticks([]) # Remove x-ticks for vertical colorbar
num_ticks = 11 # Show more ticks
tick_positions = np.linspace(vmin, vmax_hd, num_ticks)
tick_positions = np.linspace(vmin, vmax, num_ticks)
ax.set_yticks(tick_positions)
ax.set_yticklabels([f"{tick:.2f}" for tick in tick_positions]) # format tick labels
ax.set_title(colorbar_title, loc='right', pad=15)
ax.set_title(imt, pad=15)
fig.subplots_adjust(left=0.25, right=0.75, bottom=0.05, top=0.95) # Adjust Layout
fig.savefig("colorbar_" + str(j) + ".svg", bbox_inches='tight')
plt.close(fig)