7 Commits

Author SHA1 Message Date
fe9d886499 interpolation is always used on the final grid 2025-09-12 10:37:03 +02:00
f7eb39c43c add final image smoothing through binlinear interpolation 2025-09-10 18:39:43 +02:00
00bd39a098 impose requirement of minimum size of range of output data to do image processing 2025-09-10 16:33:11 +02:00
5a1f43d6cd enforce: user must have "activity rate estimation" unselected for custom rate to be used
Previously, user could enter a value enter the  custom rate box, enable "activity rate estimation" and the custom rate box would disappear but the program would still see the value previously entered and use it even though it was no longer visible in the user interface
2025-09-10 12:00:50 +02:00
a1c0ae36bb set a minimum number of computed grid values to trigger upscaling of grid image 2025-09-09 14:41:02 +02:00
63351ceb10 fix weighting option selection 2025-09-09 11:03:05 +02:00
65759b86f1 change search interval for PGV to be different than that for PGA/SA 2025-09-09 10:56:35 +02:00
2 changed files with 34 additions and 236 deletions

View File

@@ -1,205 +1,5 @@
# -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import t, norm
from scipy.optimize import root_scalar
from timeit import default_timer as timer
import logging
logger = logging.getLogger(__name__)
import mpmath
from openquake.hazardlib.geo import Point #This class represents a geographical point in terms of longitude, latitude, and depth (with respect to the Earth surface).
from openquake.hazardlib.geo.surface.planar import PlanarSurface
from openquake.hazardlib.source.characteristic import CharacteristicFaultSource
from openquake.hazardlib.mfd import ArbitraryMFD
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.scalerel import WC1994 #Wells and Coppersmith magnitude – rupture area relationships
from openquake.hazardlib.site import Site, SiteCollection
from openquake.hazardlib.contexts import ContextMaker
from openquake.hazardlib.valid import gsim
from openquake.hazardlib.imt import PGA
import sys
def my_trivial_task(x):
"""
A function that does a trivial task
"""
return x * x
def compute_IMT_exceedance(rx_lat, rx_lon, r, fr, p, lambdas, D, percentages_D, magnitudes, magnitude_pdf, magnitude_cdf, model, imt='PGA', IMT_min=0.01, IMT_max=2.0, rx_label=None, rtol=0.1, use_cython=False, **kwargs):
n_events = len(r)
try:
gmpes = [gsim(model)]
except:
msg = f"{model} was not found in the openquake gsim directory"
logger.error(msg)
raise Exception(msg)
if model == 'Lasocki2013': #this model requires the number of earthquake records
if imt=='PGA': #extract number of records for PGA
num_ground_motion_records = gmpes[0].COEFFS.non_sa_coeffs[PGA()]['N']
else: #extract number of records for SA()
freq = float(imt[imt.find('(')+1:imt.find(')')]) # get the desired frequency of SA
first_index = np.where(gmpes[0].COEFFS.get_coeffs('N')[0]==freq)[0][0]
num_ground_motion_records = gmpes[0].COEFFS.get_coeffs('N')[1][first_index][0]
else:
num_ground_motion_records = 0
#placeholder values that do not have any effect
Mag = 5.0 #placeholder mag, must be valid for that context; will be overwritten in loop
rupture_aratio = 1.5
Strike = 0
Dip = 90
Rake = 0
Hypocenter = Point(rx_lon, rx_lat, 0.0) #does not matter in our case; just set eq location to be same as receiver
#according to the magnitude and MSR calculate planar surface
planar_surface = PlanarSurface.from_hypocenter(
hypoc=Hypocenter,
msr=WC1994(),
mag=Mag,
aratio=rupture_aratio,
strike=Strike,
dip=Dip,
rake=Rake,
)
# site for which we compute (receiver location)
site_collection = SiteCollection([Site(location=Point(rx_lon, rx_lat, 0))])
imtls = {s: [0] for s in [imt]} #required for context maker, M = 2 IMTs
context_maker = ContextMaker('Induced', gmpes, {'imtls': imtls, 'mags': [Mag]}) #necessary contexts builder
src = CharacteristicFaultSource(source_id = 1,
name = 'rup',
tectonic_region_type = 'Induced',
mfd = ArbitraryMFD([Mag], [0.01]), #this does not have any effect
temporal_occurrence_model = PoissonTOM(50.), #this is also not really used
surface = planar_surface,
rake = Rake)
ctx = context_maker.from_srcs([src], site_collection)[0] #returns one context from the source for one rupture
if use_cython:
from cython_exceedance import exceedance_core
def exceedance_root_function(a):
return exceedance_core(a, r, fr, lambdas, D, percentages_D, magnitudes,
magnitude_pdf, magnitude_cdf, context_maker, ctx,
model, num_ground_motion_records) - p
else:
def exceedance_root_function(a):
exceedance_prob_sum = 0
log_a = np.log(a) # Precompute log(a)
for j in range(len(lambdas)): #loop through all lambdas
lambda_j = lambdas[j]
D_j_val = percentages_D[j] * D # Use a different name to avoid shadowing D
lambda_D_j = lambda_j * D_j_val
denom_j = (1 - np.exp(-lambda_D_j))
if denom_j == 0: # Avoid division by zero if lambda_D_j is very small or zero
continue
for i in range(n_events): #loop through all events
ri = r[i] # Epicentral distance
fr_i = fr[i] # Location probability f(r)
ctx.repi = ri
# Precompute terms only dependent on lambda_j, D_j, m
lambda_D_j_f_m = lambda_D_j * magnitude_pdf
exp_term_m = np.exp(-lambda_D_j * (1 - magnitude_cdf))
f_conditional_base_m = (lambda_D_j_f_m * exp_term_m) / denom_j
for k in range(len(magnitudes)): #loop through all values of magnitude pdf and cdf
m = magnitudes[k]
ctx.mag = m # update context magnitude
# Calculate f_conditional (simpler now)
f_conditional = f_conditional_base_m[k]
mean, sig, _, _ = context_maker.get_mean_stds(ctx)
log_gm_predicted = mean[0][0][0]
variance_term = sig[0][0][0]
residual = log_a - log_gm_predicted # Use precomputed log_a
if residual <= 0:
exceedance_probability = 1.0
else:
# Avoid division by zero or very small numbers if variance_term is ~0
if variance_term < 1e-15: # Adjust threshold as needed
exceedance_probability = 0.0
else:
t_value = residual / variance_term
if model == 'Lasocki2013':
exceedance_probability = t.sf(t_value, num_ground_motion_records - 3) # student t distribution, degrees of freedom: n-3; sf = 1 - cdf
else:
exceedance_probability = norm.sf(t_value) # equivalent to 1.0 - norm.cdf(t_value)
location_exceedance_prob = exceedance_probability * f_conditional * fr_i
exceedance_prob_sum += location_exceedance_prob
return exceedance_prob_sum - p
# Check function values at different test points
IMT_mid = (IMT_max-IMT_min)/2
lower_bound_value = exceedance_root_function(IMT_min)
mid_point_value = exceedance_root_function(IMT_mid)
upper_bound_value = exceedance_root_function(IMT_max)
logger.info(f"Receiver: {str(rx_label)}")
logger.info(f"Function value at {imt} = {str(IMT_min)} : {lower_bound_value}")
logger.info(f"Function value at {imt} = {str(IMT_mid)} : {mid_point_value}")
logger.info(f"Function value at {imt} = {str(IMT_max)} : {upper_bound_value}")
if np.sign(lower_bound_value) == np.sign(upper_bound_value):
msg = "Function values at the interval endpoints must differ in sign for fsolve to work. Expand the interval or use a different model."
logger.error(msg)
gm_est = np.nan
return gm_est
# raise ValueError(msg)
# Find root of function
start = timer()
try:
method='brenth'
logger.debug("Now trying Scipy " + method)
output = root_scalar(exceedance_root_function, bracket=[IMT_min, IMT_max], rtol=rtol, method=method)
#output = root_scalar(exceedance_root_function, method=method)
gm_est = output.root
except Exception as error:
logger.error(f"An exception occurred: {error}")
logger.info("Set ground motion value to nan")
gm_est = np.nan
end = timer()
logger.info(f"Ground motion estimation computation time: {round(end - start,1)} seconds")
logger.info(f"Estimated {imt}: {gm_est}")
return gm_est
def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max,
m_kde_method, xy_select, grid_dim, xy_win_method, rate_select, time_win_duration,
@@ -260,7 +60,7 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
from igfash.io import read_mat_cat, read_mat_m, read_mat_mc, read_mat_pdf, read_csv
from igfash.window import win_CTL, win_CNE
import igfash.kde as kde
#from igfash.gm import compute_IMT_exceedance
from igfash.gm import compute_IMT_exceedance
from igfash.compute import get_cdf, hellinger_dist, cols_to_rows
from igfash.rate import lambda_probs, calc_bins, bootstrap_forecast_rolling
from igfash.mc import estimate_mc
@@ -288,9 +88,7 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
else:
logger.setLevel(logging.INFO)
# temporary hard-coded configuration
# exclude_low_fxy = False
exclude_low_fxy = True
exclude_low_fxy = True # skip low probability areas of the map
thresh_fxy = 1e-3 # minimum fxy value (location PDF) needed to do PGA estimation (to skip low probability areas); also should scale according to number of grid points
# log user selections
@@ -310,7 +108,6 @@ verbose: {verbose}")
logger.debug(f"Igfash version {version('igfash')}")
logger.debug(f"Rbeast version {version('rbeast')}")
logger.debug(f"Dask version {version('dask')}")
logger.debug(f"Logging version {logging.__version__}")
dask.config.set(scheduler='processes')
@@ -326,10 +123,6 @@ verbose: {verbose}")
logger.info("No magnitude label of catalog specified, therefore try Mw by default")
mag_label = 'Mw'
# if cat_label == None:
# print("No magnitude label of catalog specified, therefore try 'Catalog' by default")
# cat_label='Catalog'
time, mag, lat, lon, depth = read_mat_cat(catalog_file, mag_label=mag_label, catalog_label='Catalog')
# check for null magnitude values
@@ -459,7 +252,7 @@ verbose: {verbose}")
# %% compute KDE and extract PDF
start = timer()
if xy_win_method == "TW":
if xy_win_method:
logger.info("Time weighting function selected")
x_weights = np.linspace(0, 15, len(t_windowed))
@@ -520,7 +313,7 @@ verbose: {verbose}")
# run activity rate modeling
lambdas = [None]
if custom_rate != None and forecast_select:
if custom_rate != None and forecast_select and not rate_select:
logger.info(f"Using activity rate specified by user: {custom_rate} per {time_unit}")
lambdas = np.array([custom_rate], dtype='d')
lambdas_perc = np.array([1], dtype='d')
@@ -655,36 +448,43 @@ verbose: {verbose}")
start = timer()
use_pp = True
use_pp = False
if use_pp: # use dask parallel computing
pbar = ProgressBar()
pbar.register()
# iter = range(0,len(distances))
iter = indices
iml_grid_raw = [] # raw ground motion grids
for imt in products:
logger.info(f"Estimating {imt}")
imls = [dask.delayed(compute_IMT_exceedance)(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas, forecast_len,
lambdas_perc, m_range, m_pdf, m_cdf, model, imt=imt, IMT_min=0.0,
IMT_max=2.0, rx_label=i, rtol=0.1, use_cython=False) for i in iter[:2]]
#imls = [dask.delayed(my_trivial_task)(i) for i in iter]
if imt == "PGV":
IMT_max = 200 # search interval max for velocity (cm/s)
else:
IMT_max = 2.0 # search interval max for acceleration (g)
imls = [dask.delayed(compute_IMT_exceedance)(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas,
forecast_len, lambdas_perc, m_range, m_pdf, m_cdf, model,
log_level=logging.DEBUG, imt=imt, IMT_min=0.0, IMT_max=IMT_max, rx_label=i,
rtol=0.1, use_cython=True) for i in iter]
iml = dask.compute(*imls)
iml_grid_raw.append(list(iml))
else:
iml_grid_raw = []
iter = indices
for imt in products:
if imt == "PGV":
IMT_max = 200 # search interval max for velocity (cm/s)
else:
IMT_max = 2.0 # search interval max for acceleration (g)
iml = []
for i in iter:
iml_i = compute_IMT_exceedance(rx_lat[i], rx_lon[i], distances[i].flatten(), fr, p, lambdas, forecast_len,
lambdas_perc, m_range, m_pdf, m_cdf, model, imt=imt, IMT_min = 0.0,
IMT_max = 2.0, rx_label = i, rtol = 0.1, use_cython=False)
IMT_max = IMT_max, rx_label = i, rtol = 0.1, use_cython=True)
iml.append(iml_i)
logger.info(f"Estimated {imt} at rx {i} is {iml_i}")
iml_grid_raw.append(iml)
@@ -692,15 +492,11 @@ verbose: {verbose}")
end = timer()
logger.info(f"Ground motion exceedance computation time: {round(end - start, 1)} seconds")
logger.info("Test3 run finished")
if np.isnan(iml_grid_raw).all():
msg = "No valid ground motion intensity measures were forecasted. Try a different ground motion model."
logger.error(msg)
raise Exception(msg)
# create list of one empty list for each imt
iml_grid = [[] for _ in range(len(products))] # final ground motion grids
iml_grid_prep = iml_grid.copy() # temp ground motion grids
@@ -723,17 +519,20 @@ verbose: {verbose}")
dtype=np.float64) # this reduces values to 8 decimal places
iml_grid_tmp = np.nan_to_num(iml_grid[j]) # change nans to zeroes
# upscale the grid
# upscale the grid, trim, and interpolate if there are at least 10 grid values with range greater than 0.1
if np.count_nonzero(iml_grid_tmp) >= 10 and vmax-vmin > 0.1:
up_factor = 4
iml_grid_hd = resize(iml_grid_tmp, (up_factor * len(iml_grid_tmp), up_factor * len(iml_grid_tmp)),
mode='reflect', anti_aliasing=False)
iml_grid_hd[iml_grid_hd == 0.0] = np.nan # change zeroes back to nan
# trim edges so the grid is not so blocky
vmin_hd = min(x for x in iml_grid_hd.flatten() if not isnan(x))
vmax_hd = max(x for x in iml_grid_hd.flatten() if not isnan(x))
trim_thresh = vmin
iml_grid_hd[iml_grid_hd < trim_thresh] = np.nan
else:
iml_grid_hd = iml_grid_tmp
iml_grid_hd[iml_grid_hd == 0.0] = np.nan # change zeroes back to nan
#vmin_hd = min(x for x in iml_grid_hd.flatten() if not isnan(x))
vmax_hd = max(x for x in iml_grid_hd.flatten() if not isnan(x))
# generate image overlay
north, south = lat.max(), lat.min() # Latitude range
@@ -746,7 +545,7 @@ verbose: {verbose}")
cmap_name = 'YlOrRd'
cmap = plt.get_cmap(cmap_name)
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(iml_grid_hd, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
ax.imshow(iml_grid_hd, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax, interpolation='bilinear')
ax.axis('off')
# Save the figure

View File

@@ -58,7 +58,6 @@ def main(argv):
parser.add_argument("--verbose", type=str2bool)
args = parser.parse_args()
print(args)
shf(**vars(args))
return