Compare commits

...

6 Commits

View File

@ -45,20 +45,16 @@ def main(catalog_file, mc_file, pdf_file, m_file, m_select, mag_label, mc, m_max
""" """
import sys import sys
from importlib.metadata import version
import logging import logging
from base_logger import getDefaultLogger from base_logger import getDefaultLogger
from timeit import default_timer as timer from timeit import default_timer as timer
from math import ceil, floor, isnan from math import ceil, floor, isnan
import numpy as np import numpy as np
import scipy
import obspy
import dask
from dask.diagnostics import ProgressBar # use Dask progress bar from dask.diagnostics import ProgressBar # use Dask progress bar
import kalepy as kale import kalepy as kale
import utm import utm
from skimage.transform import resize from skimage.transform import resize
import psutil
import openquake.engine
import igfash import igfash
from igfash.io import read_mat_cat, read_mat_m, read_mat_pdf, read_csv from igfash.io import read_mat_cat, read_mat_m, read_mat_pdf, read_csv
from igfash.window import win_CTL, win_CNE from igfash.window import win_CTL, win_CNE
@ -105,28 +101,24 @@ verbose: {verbose}")
# print key package version numbers # print key package version numbers
logger.debug(f"Python version {sys.version}") logger.debug(f"Python version {sys.version}")
logger.debug(f"Numpy version {np.__version__}") logger.debug(f"Numpy version {version('numpy')}")
logger.debug(f"Scipy version {scipy.__version__}") logger.debug(f"Scipy version {version('scipy')}")
logger.debug(f"Obspy version {obspy.__version__}") logger.debug(f"Obspy version {version('obspy')}")
logger.debug(f"Openquake version {openquake.engine.__version__}") logger.debug(f"Openquake version {version('openquake.engine')}")
logger.debug(f"Igfash version {igfash.__version__}") logger.debug(f"Igfash version {version('igfash')}")
logger.debug(f"Rbeast version {version('rbeast')}")
# print number of cpu cores available logger.debug(f"Dask version {version('dask')}")
ncpu = psutil.cpu_count(logical=False)
logger.debug(f"Number of cpu cores available: {ncpu}")
for process in psutil.process_iter():
with process.oneshot():
# cpu = process.cpu_percent()
cpu = process.cpu_percent() / ncpu
if cpu > 1:
logger.debug(f"{process.name()}, {cpu}")
logger.debug(f"BASELINE CPU LOAD% {psutil.cpu_percent(interval=None, percpu=True)}")
dask.config.set(scheduler='processes') dask.config.set(scheduler='processes')
# validate m_max against the grond motion model if forecasting was selected
models_anthro_limited = ['Lasocki2013', 'Atkinson2015', 'ConvertitoEtAl2012Geysers'] # these models require that m_max<=4.5
if forecast_select:
if m_max > 4.5 and model in models_anthro_limited:
msg = "Selected ground motion model only valid up to a maximum magnitude of 4.5. Please try again with a lower maximum magnitude."
logger.error(msg)
raise Exception(msg)
# run magnitude distribution modeling if selected by user and no magnitude pdf file provided # run magnitude distribution modeling if selected by user and no magnitude pdf file provided
if m_select and m_range[0] == None and m_pdf[0] == None: if m_select and m_range[0] == None and m_pdf[0] == None:
logger.info("Magnitude distribution modeling selected") logger.info("Magnitude distribution modeling selected")
@ -166,9 +158,9 @@ verbose: {verbose}")
lat = np.delete(lat, indices) lat = np.delete(lat, indices)
lon = np.delete(lon, indices) lon = np.delete(lon, indices)
# if user does not provide a m_max, set m_max to 3 magnitude units above max magnitude in catalog # if user does not provide a m_max, set m_max to 1 magnitude unit above max magnitude in catalog
if m_max == None: if m_max == None:
m_max = mag.max() + 3.0 m_max = mag.max() + 1.0
start = timer() start = timer()
@ -365,7 +357,7 @@ verbose: {verbose}")
act_rate, bin_counts, bin_edges, out, pprs, rt, idx, u_e = calc_bins(np.array(datenum_data), time_unit, act_rate, bin_counts, bin_edges, out, pprs, rt, idx, u_e = calc_bins(np.array(datenum_data), time_unit,
time_win_duration, dates_calc, time_win_duration, dates_calc,
rate_forecast, rate_unc_high, rate_unc_low, rate_forecast, rate_unc_high, rate_unc_low,
multiplicator, quiet=True) multiplicator, quiet=True, figsize=(14,9))
# Assign probabilities # Assign probabilities
lambdas, lambdas_perc = lambda_probs(act_rate, dates_calc, bin_edges) lambdas, lambdas_perc = lambda_probs(act_rate, dates_calc, bin_edges)
@ -426,7 +418,10 @@ verbose: {verbose}")
rx_lat[i], rx_lon[i] = utm.to_latlon(x_rx[i], y_rx[i], utm_zone_number, rx_lat[i], rx_lon[i] = utm.to_latlon(x_rx[i], y_rx[i], utm_zone_number,
utm_zone_letter) # get receiver location as lat,lon utm_zone_letter) # get receiver location as lat,lon
# experimental - compute ground motion only at grid points that have minimum probability density of thresh_fxy # convert distances from m to km because openquake ground motion models take input distances in kilometres
distances = distances/1000.0
# compute ground motion only at grid points that have minimum probability density of thresh_fxy
if exclude_low_fxy: if exclude_low_fxy:
indices = list(np.where(fxy.flatten() > thresh_fxy)[0]) indices = list(np.where(fxy.flatten() > thresh_fxy)[0])
else: else:
@ -475,6 +470,11 @@ verbose: {verbose}")
end = timer() end = timer()
logger.info(f"Ground motion exceedance computation time: {round(end - start, 1)} seconds") logger.info(f"Ground motion exceedance computation time: {round(end - start, 1)} seconds")
if np.isnan(iml_grid_raw).all():
msg = "No valid ground motion intensity measures were forecasted. Try a different ground motion model."
logger.error(msg)
raise Exception(msg)
# create list of one empty list for each imt # create list of one empty list for each imt
iml_grid = [[] for _ in range(len(products))] # final ground motion grids iml_grid = [[] for _ in range(len(products))] # final ground motion grids
iml_grid_prep = iml_grid.copy() # temp ground motion grids iml_grid_prep = iml_grid.copy() # temp ground motion grids