Removed unnecessary matlab snippets
This commit is contained in:
parent
064a0f75cb
commit
c81a456449
@ -1,40 +0,0 @@
|
|||||||
% function [Catalog] = CatalogFilterNaN(fullcatalog,...)
|
|
||||||
% Select lines/values from catalog without NaN and empty string
|
|
||||||
% Input parameters are pairs:
|
|
||||||
% names of fields
|
|
||||||
% eg. CatalogSelectRange(fullcatalog,'Mw','EID')
|
|
||||||
%
|
|
||||||
% (c) Dorota Olszewska IG PAS
|
|
||||||
|
|
||||||
function [Catalog] = CatalogFilterNaN(fullcatalog,varargin)
|
|
||||||
Catalog = fullcatalog ;
|
|
||||||
range = true(size(fullcatalog(1).val)) ;
|
|
||||||
na_varargin = numel(varargin) ;
|
|
||||||
for i= 1:na_varargin
|
|
||||||
if ~ischar(varargin{i})
|
|
||||||
warning(['Wrong type of parameter ' num2str(i)])
|
|
||||||
continue
|
|
||||||
end
|
|
||||||
index = find(strcmp(varargin{i},{fullcatalog.field})) ;
|
|
||||||
if length(index) ~= 1
|
|
||||||
warning(['Can not select field ' varargin{i}])
|
|
||||||
continue
|
|
||||||
end
|
|
||||||
if fullcatalog(index).type == 3
|
|
||||||
for ii=1:length(Catalog(index).val)
|
|
||||||
lrange(ii,1) = ~isempty(Catalog(index).val{ii});
|
|
||||||
end
|
|
||||||
else
|
|
||||||
lrange = ~isnan(fullcatalog(index).val) ;
|
|
||||||
%nnel = numel(lrange) ;
|
|
||||||
end
|
|
||||||
range = range & lrange;
|
|
||||||
end
|
|
||||||
for i = 1:numel(fullcatalog)
|
|
||||||
if fullcatalog(i).type == 3
|
|
||||||
Catalog(i).val = {fullcatalog(i).val{range}}' ;
|
|
||||||
else
|
|
||||||
Catalog(i).val = fullcatalog(i).val(range) ;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,55 +0,0 @@
|
|||||||
% function [Catalog] = CatalogSelectFields(fullcatalog,...)
|
|
||||||
% Select fields from IS-POIG catalog v2.0
|
|
||||||
% Produce reduced IS-POIG catalog v2.0
|
|
||||||
% Input parameters are names of fields
|
|
||||||
% eg. CatalogSelectFields(fullcatalog,'Date','Lat','Long','Mw')
|
|
||||||
% or cells with names of fields
|
|
||||||
% eg. CatalogSelectFields(fullcatalog,{'Date','Lat','Long','Mw'})
|
|
||||||
%
|
|
||||||
% (c) Jan Wiszniowski IG PAS
|
|
||||||
% v_2.0 19-09-2016 Dorota Olszewska
|
|
||||||
% 2021-10-22: test of inputs and error messages.
|
|
||||||
%
|
|
||||||
function [Catalog] = CatalogSelectFields(fullcatalog,varargin)
|
|
||||||
na_varargin = length(varargin) ;
|
|
||||||
no_cols = 0 ;
|
|
||||||
for i= 1:na_varargin
|
|
||||||
if iscell(varargin{i})
|
|
||||||
cellin = varargin{i} ;
|
|
||||||
for j= 1:numel(cellin)
|
|
||||||
if ischar(cellin{j})
|
|
||||||
if ~any(strcmp(cellin{j},{fullcatalog.field}))
|
|
||||||
error('Field %s does not exist in the catalog', cellin{j})
|
|
||||||
end
|
|
||||||
no_cols = no_cols+1 ;
|
|
||||||
else
|
|
||||||
error('Wrong %d{%d} input argument of the CatalogSelectFields function', j, i)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
elseif ischar(varargin{i})
|
|
||||||
if ~any(strcmp(varargin{i},{fullcatalog.field}))
|
|
||||||
error('Field %s does not exist in the catalog', varargin{i})
|
|
||||||
end
|
|
||||||
no_cols = no_cols+1 ;
|
|
||||||
else
|
|
||||||
error('Wrong %d input of the CatalogSelectFields function', i)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
Catalog = struct('field',cell(1,no_cols),'type',cell(1,no_cols),'val',cell(1,no_cols),'unit',cell(1,no_cols),'description',cell(1,no_cols),'fieldType',cell(1,no_cols)) ;
|
|
||||||
no_cols = 0 ;
|
|
||||||
for i= 1:na_varargin
|
|
||||||
if iscell(varargin{i})
|
|
||||||
cellin = varargin{i} ;
|
|
||||||
for j= 1:numel(cellin)
|
|
||||||
if ischar(cellin{j})
|
|
||||||
no_cols = no_cols+1 ;
|
|
||||||
Catalog(no_cols) = fullcatalog(strcmp(cellin{j},{fullcatalog.field})) ;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
elseif ischar(varargin{i})
|
|
||||||
no_cols = no_cols+1 ;
|
|
||||||
Catalog(no_cols) = fullcatalog(strcmp(varargin{i},{fullcatalog.field})) ;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -1,83 +0,0 @@
|
|||||||
% function [Catalog] = CatalogSelectRange(fullcatalog,...)
|
|
||||||
% Select lines/values from catalog
|
|
||||||
% Input parameters are pairs:
|
|
||||||
% names of fields
|
|
||||||
% and
|
|
||||||
% range of values
|
|
||||||
% for numeric there are: [minval maxval] or [val] or {val1, val2, val3, ...}
|
|
||||||
% for strings there are: 'valtext' or {'valtext1', 'valtext2', 'valtext3', ...}
|
|
||||||
% eg. CatalogSelectRange(fullcatalog,'Mw',[0 4.5])
|
|
||||||
%
|
|
||||||
% (c) Jan Wiszniowski IG PAS
|
|
||||||
% Modyfied 2016.02.04 - numerical comparison with tolerance 1e-14
|
|
||||||
|
|
||||||
function [Catalog] = CatalogSelectRange(fullcatalog,varargin)
|
|
||||||
Catalog = fullcatalog ;
|
|
||||||
range = true(size(fullcatalog(1).val)) ;
|
|
||||||
na_varargin = numel(varargin) ;
|
|
||||||
for i= 1:2:na_varargin-1
|
|
||||||
if ~ischar(varargin{i})
|
|
||||||
warning(['Wrong type of parameter ' num2str(i)])
|
|
||||||
continue
|
|
||||||
end
|
|
||||||
index = find(strcmp(varargin{i},{fullcatalog.field})) ;
|
|
||||||
if length(index) ~= 1
|
|
||||||
warning(['Can not select field ' varargin{i}])
|
|
||||||
continue
|
|
||||||
end
|
|
||||||
if fullcatalog(index).type == 3
|
|
||||||
lrange = varargin{i+1} ;
|
|
||||||
if ischar(lrange)
|
|
||||||
range = range & strcmp(lrange,fullcatalog(index).val) ;
|
|
||||||
elseif iscell(lrange)
|
|
||||||
l_range = false(size(range)) ;
|
|
||||||
for j = 1:numel(lrange)
|
|
||||||
llrange = lrange{j} ;
|
|
||||||
if ischar(llrange)
|
|
||||||
l_range = l_range | strcmp(llrange,fullcatalog(index).val) ;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
range = range & l_range ;
|
|
||||||
else
|
|
||||||
warning(['Wrong type of walues for parameter ' varargin{i}])
|
|
||||||
end
|
|
||||||
else
|
|
||||||
lrange = varargin{i+1} ;
|
|
||||||
nnel = numel(lrange) ;
|
|
||||||
if isnumeric(lrange)
|
|
||||||
tol = abs(lrange) ./ 1e15 ;
|
|
||||||
if nnel == 2
|
|
||||||
range = range & fullcatalog(index).val >= lrange(1)-tol(1) & fullcatalog(index).val <= lrange(2)+tol(2) ;
|
|
||||||
elseif nnel == 1
|
|
||||||
range = range & abs(fullcatalog(index).val - lrange(1)) <= tol(1) ;
|
|
||||||
end
|
|
||||||
elseif iscell(lrange)
|
|
||||||
l_range = false(size(range)) ;
|
|
||||||
for j = 1:numel(lrange)
|
|
||||||
llrange = lrange{j} ;
|
|
||||||
tol = abs(llrange) ./ 1e15 ;
|
|
||||||
if isnumeric(llrange)
|
|
||||||
if numel(llrange) == 2
|
|
||||||
l_range = l_range | fullcatalog(index).val >= llrange(1)-tol(1) & fullcatalog(index).val <= llrange(2)+tol(2) ;
|
|
||||||
elseif numel(llrange) == 1
|
|
||||||
l_range = l_range | abs(range & fullcatalog(index).val - llrange(1)) <= tol(1);
|
|
||||||
end
|
|
||||||
else
|
|
||||||
warning(['Wrong type of walues for parameter ' varargin{i}])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
range = range & l_range ;
|
|
||||||
else
|
|
||||||
warning(['Wrong type of walues for parameter ' varargin{i}])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
for i = 1:numel(fullcatalog)
|
|
||||||
if fullcatalog(i).type == 3
|
|
||||||
Catalog(i).val = {fullcatalog(i).val{range}}' ;
|
|
||||||
else
|
|
||||||
Catalog(i).val = fullcatalog(i).val(range) ;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -1,100 +0,0 @@
|
|||||||
% function [Catalog] = merge2Catalog(catalog1,catalog2)
|
|
||||||
% Add values from 1st Catalog to 2nd
|
|
||||||
%
|
|
||||||
% v1.1 - 2021119 - sort according by time was added
|
|
||||||
% (c) Dorota Olszewska IG PAS
|
|
||||||
|
|
||||||
function [Catalog] = merge2Catalog(Catalog,catalog2,ID)
|
|
||||||
|
|
||||||
if sum(strcmp(ID,{Catalog.field}))+sum(strcmp(ID,{catalog2.field})) == 0
|
|
||||||
error([ID,' is not field of both catalogs ', ID])
|
|
||||||
end
|
|
||||||
|
|
||||||
ID1=Catalog(strcmp(ID,{Catalog.field})).val;
|
|
||||||
ID2=catalog2(strcmp(ID,{catalog2.field})).val;
|
|
||||||
|
|
||||||
if length(unique(ID1))~=length(ID1) || length(unique(ID2))~=length(ID2)
|
|
||||||
error([ID,' has not unique variable at both catalogs '])
|
|
||||||
end
|
|
||||||
|
|
||||||
no_cols = size(Catalog,2);
|
|
||||||
|
|
||||||
for j = 1:numel(ID2)
|
|
||||||
llrange = ID2{j} ;
|
|
||||||
l_range(j) = sum(strcmp(llrange,ID1)) ;
|
|
||||||
end
|
|
||||||
|
|
||||||
catalog2_row=CatalogSelectRange(catalog2,ID,catalog2(strcmp(ID,{catalog2.field})).val(l_range==0));
|
|
||||||
|
|
||||||
|
|
||||||
% add rows by RID
|
|
||||||
no_rows_row = size(catalog2_row(1).val,1);
|
|
||||||
if no_rows_row == 0
|
|
||||||
else
|
|
||||||
for i=1:no_cols
|
|
||||||
if Catalog(i).type == 3
|
|
||||||
if sum(strcmp(Catalog(i).field,{catalog2_row.field})) == 0
|
|
||||||
Catalog(i).val=[Catalog(1).val;repmat({'NaN'},no_rows_row,1)];
|
|
||||||
else
|
|
||||||
Catalog(i).val=[Catalog(i).val;catalog2_row(strcmp(Catalog(i).field,{catalog2_row.field})).val];
|
|
||||||
end
|
|
||||||
else
|
|
||||||
if sum(strcmp(Catalog(i).field,{catalog2_row.field})) == 0
|
|
||||||
Catalog(i).val=[Catalog(i).val;NaN(no_rows_row,1)];
|
|
||||||
else
|
|
||||||
Catalog(i).val=[Catalog(i).val;catalog2_row(strcmp(Catalog(i).field,{catalog2_row.field})).val];
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
ID1=Catalog(strcmp(ID,{Catalog.field})).val;
|
|
||||||
ID2=catalog2(strcmp(ID,{catalog2.field})).val;
|
|
||||||
|
|
||||||
no_cols = size(Catalog,2);
|
|
||||||
no_rows = size(Catalog(1).val,1);
|
|
||||||
|
|
||||||
for j = 1:numel(ID2)
|
|
||||||
llrange = ID2{j} ;
|
|
||||||
l_range(j) = sum(strcmp(llrange,ID1)) ;
|
|
||||||
end
|
|
||||||
|
|
||||||
catalog2_col=CatalogSelectRange(catalog2,ID,catalog2(strcmp(ID,{catalog2.field})).val(l_range==1));
|
|
||||||
|
|
||||||
|
|
||||||
% add columns by RID
|
|
||||||
no_rows_col = size(catalog2_col(1).val,1);
|
|
||||||
if no_rows_col == 0
|
|
||||||
else
|
|
||||||
|
|
||||||
column1={Catalog.field};
|
|
||||||
column2={catalog2_col.field};
|
|
||||||
|
|
||||||
|
|
||||||
for j = 1:numel(column2)
|
|
||||||
llrange2 = column2{j} ;
|
|
||||||
l_range2(j) = sum(strcmp(llrange2,column1)) ;
|
|
||||||
end
|
|
||||||
|
|
||||||
new_column=column2(l_range2==0);
|
|
||||||
|
|
||||||
for k=1:numel(new_column)
|
|
||||||
Catalog(no_cols+k).field = catalog2_col(strcmp(new_column{k},{catalog2_col.field})).field;
|
|
||||||
Catalog(no_cols+k).type = catalog2_col(strcmp(new_column{k},{catalog2_col.field})).type;
|
|
||||||
Catalog(no_cols+k).unit = catalog2_col(strcmp(new_column{k},{catalog2_col.field})).unit;
|
|
||||||
Catalog(no_cols+k).description = catalog2_col(strcmp(new_column{k},{catalog2_col.field})).description;
|
|
||||||
Catalog(no_cols+k).fieldType = catalog2_col(strcmp(new_column{k},{catalog2_col.field})).fieldType;
|
|
||||||
for kk=1:no_rows
|
|
||||||
rr=strcmp(Catalog(strcmp(ID,{Catalog.field})).val(kk),catalog2_col(strcmp(ID,{catalog2_col.field})).val);
|
|
||||||
if sum(rr) ==0
|
|
||||||
Catalog(no_cols+k).val(kk,1)=NaN;
|
|
||||||
else
|
|
||||||
Catalog(no_cols+k).val(kk,1) = catalog2_col(strcmp(new_column{k},{catalog2_col.field})).val(rr);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
Catalog = sortByTime(Catalog);
|
|
||||||
|
|
||||||
end
|
|
@ -1,90 +0,0 @@
|
|||||||
%
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
%
|
|
||||||
% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
% you may not use this file except in compliance with the License.
|
|
||||||
% You may obtain a copy of the License at
|
|
||||||
%
|
|
||||||
% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%
|
|
||||||
% Unless required by applicable law or agreed to in writing, software
|
|
||||||
% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
% See the License for the specific language governing permissions and
|
|
||||||
% limitations under the License.
|
|
||||||
%
|
|
||||||
% This work was partially funded by EPOS-SP Project.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Modifies event with the given 'eventId' in the catalog. If such event was not yet present in the catalog,
|
|
||||||
% it is added at the correct position with respect to time (the event is added at the end and the resulting
|
|
||||||
% catalog is sorted by time)
|
|
||||||
%
|
|
||||||
function outCatalog = modifyCatalog(catalog, eventId, eventValues)
|
|
||||||
newColIdxs = findAbsentFields({eventValues.field}, {catalog.field});
|
|
||||||
if ~isempty(newColIdxs)
|
|
||||||
% add columns that were not yet present in the catalog, but are specified in eventValues
|
|
||||||
outCatalog = insertColumns(catalog, eventValues, newColIdxs);
|
|
||||||
else
|
|
||||||
outCatalog = catalog;
|
|
||||||
end
|
|
||||||
idColumn = outCatalog(1).val; % assuming ID is always the first column
|
|
||||||
eventIdx = find(strcmp(idColumn, eventId));
|
|
||||||
isNewEvent = 0;
|
|
||||||
if isempty(eventIdx)
|
|
||||||
isNewEvent = 1;
|
|
||||||
eventIdx = length(idColumn) + 1;
|
|
||||||
outCatalog(1).val(eventIdx) = eventId;
|
|
||||||
end
|
|
||||||
for e=1:length(eventValues)
|
|
||||||
colIdx = findCatalogColumn(outCatalog, eventValues(e).field);
|
|
||||||
outCatalog(colIdx).val(eventIdx) = eventValues(e).value;
|
|
||||||
end
|
|
||||||
if isNewEvent
|
|
||||||
% fill fields that were not specified in eventValues with empty values
|
|
||||||
absentColIdxs = findAbsentFields({outCatalog.field}, {eventValues.field});
|
|
||||||
if ~isempty(absentColIdxs)
|
|
||||||
for i=2:length(absentColIdxs) % skip 1 (ID column index)
|
|
||||||
absColIdx = absentColIdxs(i);
|
|
||||||
outCatalog(absColIdx).val(eventIdx) = getEmptyFieldValue(outCatalog(absColIdx).type, 1);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
outCatalog = sortByTime(outCatalog);
|
|
||||||
end
|
|
||||||
|
|
||||||
% find Catalog columns that were not used in eventValues
|
|
||||||
function colIdxs = findAbsentFields(catalogFields, eventValuesFields)
|
|
||||||
colIdxs = [];
|
|
||||||
for c=1:length(catalogFields)
|
|
||||||
if ~ismember(catalogFields(c), eventValuesFields)
|
|
||||||
colIdxs(end + 1) = c;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
% get value that should be used as empty field for the given column type
|
|
||||||
function empty = getEmptyFieldValue(type, fieldsNo)
|
|
||||||
if (type == 3)
|
|
||||||
empty = cell(fieldsNo, 1);
|
|
||||||
else
|
|
||||||
empty = nan(fieldsNo, 1);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function outCatalog = insertColumns(catalog, eventValues, newColIdxs)
|
|
||||||
outCatalog = catalog;
|
|
||||||
rowsNo = length(catalog(1).val);
|
|
||||||
colsNo = length(catalog);
|
|
||||||
for i=1:length(newColIdxs)
|
|
||||||
newColIdx = colsNo+i;
|
|
||||||
eventFields = eventValues(newColIdxs(i));
|
|
||||||
outCatalog(newColIdx).field = eventFields.field;
|
|
||||||
outCatalog(newColIdx).type = eventFields.type;
|
|
||||||
outCatalog(newColIdx).unit = eventFields.unit;
|
|
||||||
outCatalog(newColIdx).description = eventFields.description;
|
|
||||||
outCatalog(newColIdx).fieldType = eventFields.fieldType;
|
|
||||||
outCatalog(newColIdx).val = getEmptyFieldValue(eventFields.type, rowsNo);
|
|
||||||
end
|
|
||||||
end
|
|
@ -1 +0,0 @@
|
|||||||
filterByMminAndTime
|
|
@ -1,110 +0,0 @@
|
|||||||
function [Catalog] = csv2catalog(csvFilePath, column_desc, idPrefix, sortCatalogByTime, useCsvHeader)
|
|
||||||
% DESCRIPTION: Program to create the catalogue v2.0x in the
|
|
||||||
% Matlab format file from csv file.
|
|
||||||
% INPUTS:
|
|
||||||
% - csvFilePath : path to text file with csv data. The separator between columns is defined by ‘,'.
|
|
||||||
% Description of the most popular catalog fields can be found at https://docs.cyfronet.pl/display/ISDOC/Catalog+-+description
|
|
||||||
% - column_desc : structure containing information how the catalog should be created
|
|
||||||
% - include: whether the field should be included in the resulting catalog
|
|
||||||
% - nameInCsv: name of the column in CSV file
|
|
||||||
% - inputType: type of the column in CSV file (REAL/INTEGER/TEXT/DATE_TIME/DATE/DATE_DAY/DATE_MONTH/DATE_YEAR/TIME)
|
|
||||||
% - inputTimeFormat: input format for reading time (if 'inputType' is one of time options)
|
|
||||||
% - inputTimeGroup: input group for merging dates
|
|
||||||
% - nameInCatalog: name of the column that should be insterted into resulting catalog
|
|
||||||
% - description: description of the column in the resulting catalog
|
|
||||||
% - format: format (display format) of the column in the resulting catalog
|
|
||||||
% - unit: unit of the column in the resulting catalog
|
|
||||||
% - fieldType: type of the column in the resulting catalog (e.g. Magnitude, Energy)
|
|
||||||
% - idPrefix : prefix of the ID column if the IDs should be generated (if the catalog doesn't contain
|
|
||||||
% the id or the column is not included ('include' is set to false)
|
|
||||||
|
|
||||||
%TODO handle multiple time columns
|
|
||||||
%TODO the script fails if any of the rows has an empty value at the end (but empty quotes is ok)
|
|
||||||
|
|
||||||
data = readAndCheckHeaders(csvFilePath, column_desc, useCsvHeader);
|
|
||||||
colCount = length(column_desc);
|
|
||||||
noCsvHeaderModifier = 1;
|
|
||||||
if useCsvHeader
|
|
||||||
noCsvHeaderModifier = 0;
|
|
||||||
end
|
|
||||||
rowCount = length(data) / colCount - 1 + noCsvHeaderModifier;
|
|
||||||
k = 1; % column number in the generated catalog
|
|
||||||
|
|
||||||
if ~contains_id(column_desc)
|
|
||||||
if isempty(idPrefix)
|
|
||||||
[~, idPrefix] = fileparts(csvFilePath);
|
|
||||||
end
|
|
||||||
Catalog(k).field = 'ID';
|
|
||||||
Catalog(k).type = 3;
|
|
||||||
for row = 1 : rowCount
|
|
||||||
Catalog(k).val(row, 1) = { strcat(idPrefix, '_', num2str(row,'%04.f')) };
|
|
||||||
end
|
|
||||||
Catalog(k).unit = [];
|
|
||||||
Catalog(k).description = 'Event ID';
|
|
||||||
Catalog(k).fieldType = [];
|
|
||||||
k = 2;
|
|
||||||
end
|
|
||||||
|
|
||||||
[columnsInTimeGroup, columnsNotToInclude] = getTimeGroups(column_desc);
|
|
||||||
|
|
||||||
for col=1:colCount
|
|
||||||
current_col = column_desc(col);
|
|
||||||
if ~current_col.include || ismember(col, columnsNotToInclude)
|
|
||||||
continue;
|
|
||||||
end
|
|
||||||
|
|
||||||
inputTimeGroup = current_col.inputTimeGroup;
|
|
||||||
if ~isempty(current_col.inputTimeGroup)
|
|
||||||
timeGroupArray = columnsInTimeGroup(inputTimeGroup);
|
|
||||||
for columnInGroup = 2 : length(timeGroupArray)
|
|
||||||
current_col.inputTimeFormat = [current_col.inputTimeFormat "-" column_desc(timeGroupArray(columnInGroup)).inputTimeFormat];
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
Catalog(k).field = current_col.nameInCatalog;
|
|
||||||
Catalog(k).type = current_col.format;
|
|
||||||
for row = 1 : rowCount
|
|
||||||
rawValue = data{(colCount * (row - noCsvHeaderModifier)) + col, 1};
|
|
||||||
if ~isempty(current_col.inputTimeGroup)
|
|
||||||
timeGroupArray = columnsInTimeGroup(inputTimeGroup);
|
|
||||||
for columnInGroup = 2 : length(timeGroupArray)
|
|
||||||
rawValue = [rawValue "-" data{(colCount * (row - noCsvHeaderModifier)) + timeGroupArray(columnInGroup), 1}];
|
|
||||||
end
|
|
||||||
end
|
|
||||||
if isempty(rawValue)
|
|
||||||
if strcmp(current_col.nameInCatalog, 'ID')
|
|
||||||
error('ID of the event cannot be empty (row: %d)', row)
|
|
||||||
elseif isText(current_col)
|
|
||||||
Catalog(k).val(row, 1) = {''};
|
|
||||||
else
|
|
||||||
Catalog(k).val(row, 1) = NaN;
|
|
||||||
end
|
|
||||||
else
|
|
||||||
parsedValue = parseTextValue(rawValue, current_col.inputType, current_col.inputTimeFormat);
|
|
||||||
if strcmp(current_col.format, '5a')
|
|
||||||
Catalog(k).val(row, 1) = { datestr(parsedValue, 'yyyy') };
|
|
||||||
elseif strcmp(current_col.format, '5b')
|
|
||||||
Catalog(k).val(row, 1) = { datestr(parsedValue, 'yyyy-mm') };
|
|
||||||
else
|
|
||||||
Catalog(k).val(row, 1) = parsedValue;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
Catalog(k).unit = current_col.unit;
|
|
||||||
Catalog(k).description = current_col.description;
|
|
||||||
Catalog(k).fieldType = current_col.fieldType;
|
|
||||||
k=k+1;
|
|
||||||
end
|
|
||||||
if sortCatalogByTime
|
|
||||||
Catalog = sortByTime(Catalog);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function containsId = contains_id(column_desc)
|
|
||||||
idIdxs = find(strcmp(column_desc(1).nameInCatalog, 'ID'));
|
|
||||||
if isempty(idIdxs)
|
|
||||||
containsId = 0;
|
|
||||||
else
|
|
||||||
containsId = column_desc(idIdxs).include;
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,88 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2023 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
|
|
||||||
function [gdfFileName] = csv2gdf(csvFilePath, column_desc, description, useCsvHeader)
|
|
||||||
% DESCRIPTION: Program to create GDF files (in Matlab format) from csv file. Performs a reverse action to the
|
|
||||||
% gdf2csv.m script.
|
|
||||||
% INPUTS:
|
|
||||||
% - csvFilePath : path to text file with csv data. The separator between columns is defined by ‘,'.
|
|
||||||
% Description of the most popular GDF file formats can be found at https://docs.cyfronet.pl/display/ISDOC/GDF
|
|
||||||
% - column_desc : structure containing information how the gdf should be constructed
|
|
||||||
% - description : description written into the file
|
|
||||||
|
|
||||||
data = readAndCheckHeaders(csvFilePath, column_desc, useCsvHeader);
|
|
||||||
colCount = length(column_desc);
|
|
||||||
noCsvHeaderModifier = 1;
|
|
||||||
if useCsvHeader
|
|
||||||
noCsvHeaderModifier = 0;
|
|
||||||
end
|
|
||||||
rowCount = length(data) / colCount - 1 + noCsvHeaderModifier;
|
|
||||||
|
|
||||||
[FormatName] = 'GDF';
|
|
||||||
[FormatVersion] = 2.1;
|
|
||||||
[CRS] = 'n/a';
|
|
||||||
[TimeZone] = 'UTC';
|
|
||||||
[Description] = description;
|
|
||||||
|
|
||||||
[FieldDescription] = {};
|
|
||||||
[FieldType] = {};
|
|
||||||
[FieldUnit] = {};
|
|
||||||
[d] = struct();
|
|
||||||
|
|
||||||
[columnsInTimeGroup, columnsNotToInclude] = getTimeGroups(column_desc);
|
|
||||||
colInGdf = 1; % column number in the generated gdf
|
|
||||||
for col=1:colCount
|
|
||||||
current_col = column_desc(col);
|
|
||||||
if ~current_col.include || ismember(col, columnsNotToInclude)
|
|
||||||
continue;
|
|
||||||
end
|
|
||||||
|
|
||||||
inputTimeGroup = current_col.inputTimeGroup;
|
|
||||||
if ~isempty(current_col.inputTimeGroup)
|
|
||||||
timeGroupArray = columnsInTimeGroup(inputTimeGroup);
|
|
||||||
for columnInGroup = 2 : length(timeGroupArray)
|
|
||||||
current_col.inputTimeFormat = [current_col.inputTimeFormat "-" column_desc(timeGroupArray(columnInGroup)).inputTimeFormat];
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
fieldName = current_col.nameInCatalog;
|
|
||||||
FieldDescription(colInGdf, 1) = fieldName;
|
|
||||||
FieldDescription(colInGdf, 2) = current_col.description;
|
|
||||||
FieldType(colInGdf, 1) = fieldName;
|
|
||||||
FieldType(colInGdf, 2) = current_col.format;
|
|
||||||
FieldUnit(colInGdf, 1) = fieldName;
|
|
||||||
FieldUnit(colInGdf, 2) = current_col.unit;
|
|
||||||
d.(fieldName) = [];
|
|
||||||
for row = 1 : rowCount
|
|
||||||
rawValue = data{(colCount * (row - noCsvHeaderModifier)) + col, 1};
|
|
||||||
if ~isempty(current_col.inputTimeGroup)
|
|
||||||
timeGroupArray = columnsInTimeGroup(inputTimeGroup);
|
|
||||||
for columnInGroup = 2 : length(timeGroupArray)
|
|
||||||
rawValue = [rawValue "-" data{(colCount * (row - noCsvHeaderModifier)) + timeGroupArray(columnInGroup), 1}];
|
|
||||||
end
|
|
||||||
end
|
|
||||||
if isempty(rawValue)
|
|
||||||
if isText(current_col)
|
|
||||||
d.(fieldName)(row) = {''};
|
|
||||||
else
|
|
||||||
d.(fieldName)(row) = NaN;
|
|
||||||
end
|
|
||||||
else
|
|
||||||
parsedValue = parseTextValue(rawValue, current_col.inputType, current_col.inputTimeFormat);
|
|
||||||
if strcmp(current_col.format, '5a')
|
|
||||||
d.(fieldName)(row) = { datestr(parsedValue, 'yyyy') };
|
|
||||||
elseif strcmp(current_col.format, '5b')
|
|
||||||
d.(fieldName)(row) = { datestr(parsedValue, 'yyyy-mm') };
|
|
||||||
else
|
|
||||||
d.(fieldName)(row) = parsedValue;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
colInGdf = colInGdf + 1;
|
|
||||||
end
|
|
||||||
|
|
||||||
[~, gdfFileName, ~] = fileparts(csvFilePath);
|
|
||||||
save(strcat(gdfFileName, '.mat'), 'FormatName', 'FormatVersion', 'CRS', 'TimeZone', 'Description', ...
|
|
||||||
'FieldDescription', 'FieldType', 'FieldUnit', 'd', '-v7')
|
|
||||||
end
|
|
@ -1,23 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2023 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
function [columnsInTimeGroup, columnsNotToInclude] = getTimeGroups(column_desc)
|
|
||||||
% DESCRIPTION: Script iterating through column_desc and returning column indexes grouped by the same
|
|
||||||
% inputTimeGroup. The second output is array of all the other columns indexes than first in their own respective time group
|
|
||||||
% INPUTS:
|
|
||||||
% - column_desc : structure containing definition of the CSV columns and their mapping to the final object
|
|
||||||
columnsInTimeGroup = containers.Map();
|
|
||||||
columnsNotToInclude = [];
|
|
||||||
|
|
||||||
for i=1:length(column_desc)
|
|
||||||
inputTimeGroup = column_desc(i).inputTimeGroup;
|
|
||||||
if ~isempty(inputTimeGroup)
|
|
||||||
if ~ismember(inputTimeGroup, columnsInTimeGroup.keys)
|
|
||||||
columnsInTimeGroup(inputTimeGroup) = [i];
|
|
||||||
else
|
|
||||||
columnsInTimeGroup(inputTimeGroup) = cat(1, columnsInTimeGroup(inputTimeGroup), i);
|
|
||||||
columnsNotToInclude = cat(1, columnsNotToInclude, i);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,10 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2023 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
|
|
||||||
function isText = isText(col_desc)
|
|
||||||
% DESCRIPTION: Function checking if given column is of text type
|
|
||||||
% INPUTS:
|
|
||||||
% - col_desc : structure containing information how the column should be constructed
|
|
||||||
isText = strcmp(col_desc.inputType, 'TEXT') | strcmp(col_desc.format, '5a') | strcmp(col_desc.format, '5b');
|
|
||||||
end
|
|
@ -1,35 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2023 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
|
|
||||||
function parsedValue = parseTextValue(rawValue, type, timeFormat)
|
|
||||||
% DESCRIPTION: Program that parses and returns value read from the text (a cell from a CSV file).
|
|
||||||
% INPUTS:
|
|
||||||
% - rawValue : value to parse
|
|
||||||
% - type : type of the value as defined by CsvColumnContentType.java
|
|
||||||
% - timeFormat : if the rawValue contains time, this format is used to parse it
|
|
||||||
|
|
||||||
switch type
|
|
||||||
case {'REAL', 'INTEGER'}
|
|
||||||
try
|
|
||||||
parsedValue = str2num(rawValue);
|
|
||||||
catch
|
|
||||||
error('Cannot parse number input (type: %s): %s', type, rawValue);
|
|
||||||
end
|
|
||||||
if isempty(parsedValue)
|
|
||||||
% we checked if the value is empty before parsing (and such value will not be parsed), if the value is empty
|
|
||||||
% here (after parsing), it means that it was in a wrong format and could not be parsed
|
|
||||||
error('Cannot parse number input (type: %s): %s', type, rawValue);
|
|
||||||
end
|
|
||||||
case 'TEXT'
|
|
||||||
parsedValue = {rawValue};
|
|
||||||
case 'DATE_TIME'
|
|
||||||
try
|
|
||||||
parsedValue = datenum(rawValue, timeFormat);
|
|
||||||
catch
|
|
||||||
error('Invalid input time format specification or CSV content to parse (%s)', rawValue);
|
|
||||||
end
|
|
||||||
otherwise
|
|
||||||
error('Unexpected input column type %s', type);
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,33 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2023 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
|
|
||||||
function data = readAndCheckHeaders(csvFilePath, column_desc, doCheckHeaders)
|
|
||||||
% DESCRIPTION: Program that reads content from the CSV file, checking if the content matches the headers defined
|
|
||||||
% in the column_desc structure. The returned value is a cell with all values from the csv file.
|
|
||||||
% INPUTS:
|
|
||||||
% - csvFilePath : path to the CSV file
|
|
||||||
% - column_desc : structure containing definition of the CSV columns and their mapping to the final object
|
|
||||||
|
|
||||||
fid = fopen(csvFilePath);
|
|
||||||
data = textscan(fid, '%q', 'Delimiter', ','){1}; % cell with all values from the csv file
|
|
||||||
fclose(fid);
|
|
||||||
if doCheckHeaders
|
|
||||||
check_headers(data, column_desc);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function check_headers(data, column_desc)
|
|
||||||
colCount = length(column_desc);
|
|
||||||
headers = data(1:colCount);
|
|
||||||
for i=1:colCount
|
|
||||||
if ~strcmp(column_desc(i).nameInCsv, headers(i))
|
|
||||||
error('Expected column %s, but found %s in CSV headers', column_desc(i).nameInCsv, char(headers(i)));
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
if mod(length(data), colCount) ~= 0
|
|
||||||
error('Improper number of values in one of the rows');
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
@ -1,56 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
function catalog2csv(catalog, csvFileName)
|
|
||||||
|
|
||||||
fieldNames = getFieldNames(catalog);
|
|
||||||
fieldTypes = getFieldTypes(catalog);
|
|
||||||
lineFormat = getLineFormat(fieldTypes);
|
|
||||||
M = prepareCellMatrix(catalog);
|
|
||||||
|
|
||||||
fid = fopen(csvFileName, 'w+');
|
|
||||||
fprintf(fid, '%s\n', strjoin(fieldNames', ','));
|
|
||||||
for k=1:size(M,1)
|
|
||||||
fprintf(fid, lineFormat, M{k, :});
|
|
||||||
end
|
|
||||||
fclose(fid);
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
function fieldNames = getFieldNames(catalog)
|
|
||||||
fieldNames = [];
|
|
||||||
for i=1:length(catalog)
|
|
||||||
fieldNames{i} = catalog(i).field;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function fieldTypes = getFieldTypes(catalog)
|
|
||||||
fieldTypes = [];
|
|
||||||
for i=1:length(catalog)
|
|
||||||
fieldTypes{i} = catalog(i).type;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function M = prepareCellMatrix(catalog)
|
|
||||||
M = {};
|
|
||||||
for i=1:length(catalog)
|
|
||||||
val = catalog(i).val;
|
|
||||||
type = catalog(i).type;
|
|
||||||
if iscell(val)
|
|
||||||
M(:,i) = val;
|
|
||||||
elseif isTime(type)
|
|
||||||
M(:,i) = formatCatalogTime(val, type);
|
|
||||||
else
|
|
||||||
M(:,i) = num2cell(val);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function timeStrVector = formatCatalogTime(timeVector, fieldType)
|
|
||||||
if iscell(timeVector)
|
|
||||||
timeVector = cell2mat(timeVector);
|
|
||||||
end
|
|
||||||
emptyIndexes = isnan(timeVector);
|
|
||||||
timeStrVector(~emptyIndexes, 1) = cellstr(datestr(timeVector(~emptyIndexes), 'yyyy-mm-dd HH:MM:SS.FFF'));
|
|
||||||
timeStrVector(emptyIndexes, 1) = { 'NaN' };
|
|
||||||
end
|
|
@ -1,16 +0,0 @@
|
|||||||
%
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2020 ACK Cyfronet AGH, Poland.
|
|
||||||
%
|
|
||||||
% This work was partially funded by EPOS Project funded in frame of PL-POIR4.2
|
|
||||||
% --------------
|
|
||||||
%
|
|
||||||
function [varargout] = extractGdfFields(gdf, fieldNames)
|
|
||||||
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
fieldName = fieldNames{i};
|
|
||||||
assert(isfield(gdf.d, fieldName), ['gdf has no field with name: ' fieldName])
|
|
||||||
varargout{i} = getfield(gdf.d, fieldName);
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
@ -1,46 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Get string format for the time values provided in timeVector. Format is based
|
|
||||||
% on numeric 'FieldType' (GDF) or 'type' (catalog) field
|
|
||||||
% see https://docs.cyfronet.pl/display/ISEPOS/GDF+v2.2+-+description
|
|
||||||
function timeStrVector = formatTime(timeVector, fieldType)
|
|
||||||
% if we have '5a' and '5b', the date value is a string instead of a number)
|
|
||||||
if (ischar(fieldType) && (strcmp(fieldType, '5a') || strcmp(fieldType, '5b')))
|
|
||||||
timeStrVector = timeVector;
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
if iscell(timeVector)
|
|
||||||
timeVector = cell2mat(timeVector);
|
|
||||||
end
|
|
||||||
emptyIndexes = isnan(timeVector);
|
|
||||||
timeFormat = getTimeFormat(fieldType);
|
|
||||||
timeStrVector(~emptyIndexes, 1) = cellstr(datestr(timeVector(~emptyIndexes), timeFormat));
|
|
||||||
timeStrVector(emptyIndexes, 1) = { 'NaN' };
|
|
||||||
end
|
|
||||||
|
|
||||||
function timeFormat = getTimeFormat(fieldType)
|
|
||||||
if (fieldType == 5)
|
|
||||||
timeFormat = 'yyyy mmm dd HH:MM:SS.FFF';
|
|
||||||
else
|
|
||||||
timeFormat = arrayfun(@changeToMatlabFormat, fieldType(2:end));
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
% Matlab uses different specification of the format than the one used in GDF files (based on Java format) - e.g., 'M' is
|
|
||||||
% used in GDF for month, while in Matlab it is used for minutes
|
|
||||||
function correctedFormatId = changeToMatlabFormat(javaTimeFormatId)
|
|
||||||
switch javaTimeFormatId
|
|
||||||
case 'M'
|
|
||||||
correctedFormatId = 'm';
|
|
||||||
case 'm'
|
|
||||||
correctedFormatId = 'M';
|
|
||||||
case 's'
|
|
||||||
correctedFormatId = 'S';
|
|
||||||
case 'S'
|
|
||||||
correctedFormatId = 'F';
|
|
||||||
otherwise
|
|
||||||
correctedFormatId = javaTimeFormatId;
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,142 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2020 ACK Cyfronet AGH, Poland.
|
|
||||||
%
|
|
||||||
% This work was partially funded by EPOS Project funded in frame of PL-POIR4.2
|
|
||||||
% -----------------
|
|
||||||
function csvFiles = gdf2csv(gdfFilePath)
|
|
||||||
|
|
||||||
csvFiles = {};
|
|
||||||
load(gdfFilePath);
|
|
||||||
[~, resultFileNameBase] = fileparts(gdfFilePath);
|
|
||||||
|
|
||||||
fieldNames = fieldnames(d);
|
|
||||||
fieldTypes = getFieldTypes(FieldType, fieldNames);
|
|
||||||
|
|
||||||
if (hasSingleData(d, fieldNames))
|
|
||||||
if (length(d) > 1)
|
|
||||||
M = prepareCellMatrixFromStructArrayWithScalars(d, fieldTypes);
|
|
||||||
else
|
|
||||||
M = prepareCellMatrixFromSingleStructWithVectors(d, fieldTypes, fieldNames);
|
|
||||||
end
|
|
||||||
resultFileName = [resultFileNameBase, '.csv'];
|
|
||||||
saveCsvFile(M, fieldTypes, fieldNames, resultFileName);
|
|
||||||
csvFiles = { resultFileName };
|
|
||||||
else
|
|
||||||
for i = 1:length(d)
|
|
||||||
resultFileName = [resultFileNameBase, '-', num2str(i), '.csv'];
|
|
||||||
M = prepareCellMatrixFromSingleStructWithVectors(d(i), fieldTypes, fieldNames);
|
|
||||||
saveCsvFile(M, fieldTypes, fieldNames, resultFileName);
|
|
||||||
csvFiles{i} = resultFileName;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function isSingle = hasSingleData(d, fieldNames)
|
|
||||||
isSingle = length(d) == 1 || ~hasAnyVectors(d, fieldNames);
|
|
||||||
end
|
|
||||||
|
|
||||||
function hasVector = hasAnyVectors(d, fieldNames)
|
|
||||||
hasVector = false;
|
|
||||||
for i = 1:length(d)
|
|
||||||
vectorValue = findFirstVectorValue(d(i), fieldNames);
|
|
||||||
if ~isempty(vectorValue)
|
|
||||||
hasVector = true;
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function vectorValue = findFirstVectorValue(d, fieldNames)
|
|
||||||
vectorValue = [];
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
value = d.(fieldNames{i});
|
|
||||||
if ~ischar(value) && ~isscalar(value)
|
|
||||||
vectorValue = value;
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function saveCsvFile(M, fieldTypes, fieldNames, filename)
|
|
||||||
lineFormat = getLineFormat(fieldTypes);
|
|
||||||
fid = fopen(filename, 'w+');
|
|
||||||
fprintf(fid, '%s\n', strjoin(fieldNames', ','));
|
|
||||||
for k=1:size(M,1)
|
|
||||||
fprintf(fid, lineFormat, M{k, :});
|
|
||||||
end
|
|
||||||
fclose(fid);
|
|
||||||
end
|
|
||||||
|
|
||||||
function M = prepareCellMatrixFromSingleStructWithVectors(d, fieldTypes, fieldNames)
|
|
||||||
% some structures might contain vectors mixed with scalars, in that case we want to repeat the scalar values in the csv file
|
|
||||||
d = convertScalarsToVectors(d, fieldNames);
|
|
||||||
M = cell(length(d.(fieldNames{1})), length(fieldNames));
|
|
||||||
for f = 1:length(fieldTypes)
|
|
||||||
fieldType = fieldTypes{f};
|
|
||||||
field = d.(fieldNames{f});
|
|
||||||
if (isTime(fieldType))
|
|
||||||
M(:, f) = formatTime(field, fieldType);
|
|
||||||
elseif isnumeric(field)
|
|
||||||
if (isrow(field))
|
|
||||||
field = field';
|
|
||||||
end
|
|
||||||
M(:, f) = num2cell(field);
|
|
||||||
else
|
|
||||||
if (isrow(field))
|
|
||||||
field = field';
|
|
||||||
end
|
|
||||||
M(:, f) = field;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function M = prepareCellMatrixFromStructArrayWithScalars(d, fieldTypes)
|
|
||||||
M = squeeze(struct2cell(d))';
|
|
||||||
M = cellfun(@handleEmptyArray, M, 'UniformOutput', false);
|
|
||||||
for f = 1:length(fieldTypes)
|
|
||||||
fieldType = fieldTypes{f};
|
|
||||||
if (isTime(fieldType))
|
|
||||||
M(:, f) = formatTime(M(:, f), fieldType);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function struct = convertScalarsToVectors(d, fieldNames)
|
|
||||||
struct = d;
|
|
||||||
firstVectorValue = findFirstVectorValue(d, fieldNames);
|
|
||||||
if isempty(firstVectorValue)
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
count = length(firstVectorValue);
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
field = struct.(fieldNames{i});
|
|
||||||
if (isempty(field))
|
|
||||||
field = nan;
|
|
||||||
end
|
|
||||||
if ischar(field) || isscalar(field)
|
|
||||||
[vectorValue{1:count}] = deal(field);
|
|
||||||
struct.(fieldNames{i}) = vectorValue';
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function value = handleEmptyArray(value)
|
|
||||||
if (isempty(value))
|
|
||||||
value = nan;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
% creates a vector of field types written as in FieldType, but preserving the same order of fields as in the 'd'
|
|
||||||
% structure (fallback for a situation when the types in FieldType are in different order than fieldnames(d) or when
|
|
||||||
% FieldType contains more entries than fieldnames(d)
|
|
||||||
function fieldTypes = getFieldTypes(fieldTypeCell, fieldNames)
|
|
||||||
fieldTypes = cell(1, length(fieldNames));
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
for j = 1:size(fieldTypeCell, 1)
|
|
||||||
if strcmp(fieldNames{i}, fieldTypeCell{j, 1})
|
|
||||||
fieldTypes{i} = fieldTypeCell{j, 2};
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Get string format specification used for fpritf, based on numeric 'FieldType' (GDF) or 'type' (catalog) field
|
|
||||||
% see https://docs.cyfronet.pl/display/ISEPOS/GDF+v2.2+-+description
|
|
||||||
function lineFormat = getLineFormat(fieldTypes)
|
|
||||||
lineFormat = '';
|
|
||||||
for f = 1:length(fieldTypes)
|
|
||||||
fieldType = fieldTypes{f};
|
|
||||||
formatSpec = getFormatSpec(fieldType);
|
|
||||||
lineFormat = [lineFormat ',' formatSpec];
|
|
||||||
end
|
|
||||||
lineFormat = [lineFormat(2:end) '\n'];
|
|
||||||
end
|
|
||||||
|
|
||||||
function formatSpec = getFormatSpec(fieldType)
|
|
||||||
if isTime(fieldType)
|
|
||||||
formatSpec = '"%s"';
|
|
||||||
elseif isnumeric(fieldType)
|
|
||||||
if fieldType < 10
|
|
||||||
formatSpec = getFormatSpecFromReservedFieldType(fieldType);
|
|
||||||
elseif fieldType < 200
|
|
||||||
formatSpec = ['%.', num2str(mod(fieldType, 10)), 'f'];
|
|
||||||
elseif fieldType < 300
|
|
||||||
formatSpec = ['%.', num2str(mod(fieldType, 10)), 'e'];
|
|
||||||
else
|
|
||||||
error(['Unrecognized fieldType: ', num2str(fieldType)])
|
|
||||||
end
|
|
||||||
else
|
|
||||||
error(['Unrecognized fieldType: ', num2str(fieldType)])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function formatSpec = getFormatSpecFromReservedFieldType(fieldType)
|
|
||||||
switch fieldType
|
|
||||||
case 1
|
|
||||||
formatSpec = '%f';
|
|
||||||
case 2
|
|
||||||
formatSpec = '%d';
|
|
||||||
case 3
|
|
||||||
formatSpec = '"%s"';
|
|
||||||
case 4
|
|
||||||
formatSpec = '%.1f';
|
|
||||||
case 5
|
|
||||||
formatSpec = '%s';
|
|
||||||
case 6
|
|
||||||
formatSpec = '%1.1e';
|
|
||||||
case 7
|
|
||||||
formatSpec = '%1.2e';
|
|
||||||
otherwise
|
|
||||||
error(['Unrecognized fieldType: ', num2str(fieldType)])
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,10 +0,0 @@
|
|||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Check if the format defined in 'fieldType' marks time or other type of field.
|
|
||||||
% Based on numeric 'FieldType' (GDF) or 'type' (catalog) field
|
|
||||||
% see https://docs.cyfronet.pl/display/ISEPOS/GDF+v2.2+-+description
|
|
||||||
function isTime = isTime(fieldType)
|
|
||||||
isTime = fieldType == 5 || fieldType(1) == '5';
|
|
||||||
end
|
|
@ -1,340 +0,0 @@
|
|||||||
# matlab/export//catalog2csv.m
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
function catalog2csv(catalog, csvFileName)
|
|
||||||
|
|
||||||
fieldNames = getFieldNames(catalog);
|
|
||||||
fieldTypes = getFieldTypes(catalog);
|
|
||||||
lineFormat = getLineFormat(fieldTypes);
|
|
||||||
M = prepareCellMatrix(catalog);
|
|
||||||
|
|
||||||
fid = fopen(csvFileName, 'w+');
|
|
||||||
fprintf(fid, '%s\n', strjoin(fieldNames', ','));
|
|
||||||
for k=1:size(M,1)
|
|
||||||
fprintf(fid, lineFormat, M{k, :});
|
|
||||||
end
|
|
||||||
fclose(fid);
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
function fieldNames = getFieldNames(catalog)
|
|
||||||
fieldNames = [];
|
|
||||||
for i=1:length(catalog)
|
|
||||||
fieldNames{i} = catalog(i).field;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function fieldTypes = getFieldTypes(catalog)
|
|
||||||
fieldTypes = [];
|
|
||||||
for i=1:length(catalog)
|
|
||||||
fieldTypes{i} = catalog(i).type;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function M = prepareCellMatrix(catalog)
|
|
||||||
M = {};
|
|
||||||
for i=1:length(catalog)
|
|
||||||
val = catalog(i).val;
|
|
||||||
type = catalog(i).type;
|
|
||||||
if iscell(val)
|
|
||||||
M(:,i) = val;
|
|
||||||
elseif isTime(type)
|
|
||||||
M(:,i) = formatCatalogTime(val, type);
|
|
||||||
else
|
|
||||||
M(:,i) = num2cell(val);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function timeStrVector = formatCatalogTime(timeVector, fieldType)
|
|
||||||
if iscell(timeVector)
|
|
||||||
timeVector = cell2mat(timeVector);
|
|
||||||
end
|
|
||||||
emptyIndexes = isnan(timeVector);
|
|
||||||
timeStrVector(~emptyIndexes, 1) = cellstr(datestr(timeVector(~emptyIndexes), 'yyyy-mm-dd HH:MM:SS.FFF'));
|
|
||||||
timeStrVector(emptyIndexes, 1) = { 'NaN' };
|
|
||||||
end
|
|
||||||
|
|
||||||
# matlab/export//extractGdfFields.m
|
|
||||||
%
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2020 ACK Cyfronet AGH, Poland.
|
|
||||||
%
|
|
||||||
% This work was partially funded by EPOS Project funded in frame of PL-POIR4.2
|
|
||||||
% --------------
|
|
||||||
%
|
|
||||||
function [varargout] = extractGdfFields(gdf, fieldNames)
|
|
||||||
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
fieldName = fieldNames{i};
|
|
||||||
assert(isfield(gdf.d, fieldName), ['gdf has no field with name: ' fieldName])
|
|
||||||
varargout{i} = getfield(gdf.d, fieldName);
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
# matlab/export//formatTime.m
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Get string format for the time values provided in timeVector. Format is based
|
|
||||||
% on numeric 'FieldType' (GDF) or 'type' (catalog) field
|
|
||||||
% see https://docs.cyfronet.pl/display/ISEPOS/GDF+v2.2+-+description
|
|
||||||
function timeStrVector = formatTime(timeVector, fieldType)
|
|
||||||
% if we have '5a' and '5b', the date value is a string instead of a number)
|
|
||||||
if (ischar(fieldType) && (strcmp(fieldType, '5a') || strcmp(fieldType, '5b')))
|
|
||||||
timeStrVector = timeVector;
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
if iscell(timeVector)
|
|
||||||
timeVector = cell2mat(timeVector);
|
|
||||||
end
|
|
||||||
emptyIndexes = isnan(timeVector);
|
|
||||||
timeFormat = getTimeFormat(fieldType);
|
|
||||||
timeStrVector(~emptyIndexes, 1) = cellstr(datestr(timeVector(~emptyIndexes), timeFormat));
|
|
||||||
timeStrVector(emptyIndexes, 1) = { 'NaN' };
|
|
||||||
end
|
|
||||||
|
|
||||||
function timeFormat = getTimeFormat(fieldType)
|
|
||||||
if (fieldType == 5)
|
|
||||||
timeFormat = 'yyyy mmm dd HH:MM:SS.FFF';
|
|
||||||
else
|
|
||||||
timeFormat = arrayfun(@changeToMatlabFormat, fieldType(2:end));
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
% Matlab uses different specification of the format than the one used in GDF files (based on Java format) - e.g., 'M' is
|
|
||||||
% used in GDF for month, while in Matlab it is used for minutes
|
|
||||||
function correctedFormatId = changeToMatlabFormat(javaTimeFormatId)
|
|
||||||
switch javaTimeFormatId
|
|
||||||
case 'M'
|
|
||||||
correctedFormatId = 'm';
|
|
||||||
case 'm'
|
|
||||||
correctedFormatId = 'M';
|
|
||||||
case 's'
|
|
||||||
correctedFormatId = 'S';
|
|
||||||
case 'S'
|
|
||||||
correctedFormatId = 'F';
|
|
||||||
otherwise
|
|
||||||
correctedFormatId = javaTimeFormatId;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
# matlab/export//gdf2csv.m
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2020 ACK Cyfronet AGH, Poland.
|
|
||||||
%
|
|
||||||
% This work was partially funded by EPOS Project funded in frame of PL-POIR4.2
|
|
||||||
% -----------------
|
|
||||||
function csvFiles = gdf2csv(gdfFilePath)
|
|
||||||
|
|
||||||
csvFiles = {};
|
|
||||||
load(gdfFilePath);
|
|
||||||
[~, resultFileNameBase] = fileparts(gdfFilePath);
|
|
||||||
|
|
||||||
fieldNames = fieldnames(d);
|
|
||||||
fieldTypes = getFieldTypes(FieldType, fieldNames);
|
|
||||||
|
|
||||||
if (hasSingleData(d, fieldNames))
|
|
||||||
if (length(d) > 1)
|
|
||||||
M = prepareCellMatrixFromStructArrayWithScalars(d, fieldTypes);
|
|
||||||
else
|
|
||||||
M = prepareCellMatrixFromSingleStructWithVectors(d, fieldTypes, fieldNames);
|
|
||||||
end
|
|
||||||
resultFileName = [resultFileNameBase, '.csv'];
|
|
||||||
saveCsvFile(M, fieldTypes, fieldNames, resultFileName);
|
|
||||||
csvFiles = { resultFileName };
|
|
||||||
else
|
|
||||||
for i = 1:length(d)
|
|
||||||
resultFileName = [resultFileNameBase, '-', num2str(i), '.csv'];
|
|
||||||
M = prepareCellMatrixFromSingleStructWithVectors(d(i), fieldTypes, fieldNames);
|
|
||||||
saveCsvFile(M, fieldTypes, fieldNames, resultFileName);
|
|
||||||
csvFiles{i} = resultFileName;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function isSingle = hasSingleData(d, fieldNames)
|
|
||||||
isSingle = length(d) == 1 || ~hasAnyVectors(d, fieldNames);
|
|
||||||
end
|
|
||||||
|
|
||||||
function hasVector = hasAnyVectors(d, fieldNames)
|
|
||||||
hasVector = false;
|
|
||||||
for i = 1:length(d)
|
|
||||||
vectorValue = findFirstVectorValue(d(i), fieldNames);
|
|
||||||
if ~isempty(vectorValue)
|
|
||||||
hasVector = true;
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function vectorValue = findFirstVectorValue(d, fieldNames)
|
|
||||||
vectorValue = [];
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
value = d.(fieldNames{i});
|
|
||||||
if ~ischar(value) && ~isscalar(value)
|
|
||||||
vectorValue = value;
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function saveCsvFile(M, fieldTypes, fieldNames, filename)
|
|
||||||
lineFormat = getLineFormat(fieldTypes);
|
|
||||||
fid = fopen(filename, 'w+');
|
|
||||||
fprintf(fid, '%s\n', strjoin(fieldNames', ','));
|
|
||||||
for k=1:size(M,1)
|
|
||||||
fprintf(fid, lineFormat, M{k, :});
|
|
||||||
end
|
|
||||||
fclose(fid);
|
|
||||||
end
|
|
||||||
|
|
||||||
function M = prepareCellMatrixFromSingleStructWithVectors(d, fieldTypes, fieldNames)
|
|
||||||
% some structures might contain vectors mixed with scalars, in that case we want to repeat the scalar values in the csv file
|
|
||||||
d = convertScalarsToVectors(d, fieldNames);
|
|
||||||
M = cell(length(d.(fieldNames{1})), length(fieldNames));
|
|
||||||
for f = 1:length(fieldTypes)
|
|
||||||
fieldType = fieldTypes{f};
|
|
||||||
field = d.(fieldNames{f});
|
|
||||||
if (isTime(fieldType))
|
|
||||||
M(:, f) = formatTime(field, fieldType);
|
|
||||||
elseif isnumeric(field)
|
|
||||||
if (isrow(field))
|
|
||||||
field = field';
|
|
||||||
end
|
|
||||||
M(:, f) = num2cell(field);
|
|
||||||
else
|
|
||||||
if (isrow(field))
|
|
||||||
field = field';
|
|
||||||
end
|
|
||||||
M(:, f) = field;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function M = prepareCellMatrixFromStructArrayWithScalars(d, fieldTypes)
|
|
||||||
M = squeeze(struct2cell(d))';
|
|
||||||
M = cellfun(@handleEmptyArray, M, 'UniformOutput', false);
|
|
||||||
for f = 1:length(fieldTypes)
|
|
||||||
fieldType = fieldTypes{f};
|
|
||||||
if (isTime(fieldType))
|
|
||||||
M(:, f) = formatTime(M(:, f), fieldType);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function struct = convertScalarsToVectors(d, fieldNames)
|
|
||||||
struct = d;
|
|
||||||
firstVectorValue = findFirstVectorValue(d, fieldNames);
|
|
||||||
if isempty(firstVectorValue)
|
|
||||||
return;
|
|
||||||
end
|
|
||||||
count = length(firstVectorValue);
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
field = struct.(fieldNames{i});
|
|
||||||
if (isempty(field))
|
|
||||||
field = nan;
|
|
||||||
end
|
|
||||||
if ischar(field) || isscalar(field)
|
|
||||||
[vectorValue{1:count}] = deal(field);
|
|
||||||
struct.(fieldNames{i}) = vectorValue';
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function value = handleEmptyArray(value)
|
|
||||||
if (isempty(value))
|
|
||||||
value = nan;
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
% creates a vector of field types written as in FieldType, but preserving the same order of fields as in the 'd'
|
|
||||||
% structure (fallback for a situation when the types in FieldType are in different order than fieldnames(d) or when
|
|
||||||
% FieldType contains more entries than fieldnames(d)
|
|
||||||
function fieldTypes = getFieldTypes(fieldTypeCell, fieldNames)
|
|
||||||
fieldTypes = cell(1, length(fieldNames));
|
|
||||||
for i = 1:length(fieldNames)
|
|
||||||
for j = 1:size(fieldTypeCell, 1)
|
|
||||||
if strcmp(fieldNames{i}, fieldTypeCell{j, 1})
|
|
||||||
fieldTypes{i} = fieldTypeCell{j, 2};
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# matlab/export//getLineFormat.m
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Get string format specification used for fpritf, based on numeric 'FieldType' (GDF) or 'type' (catalog) field
|
|
||||||
% see https://docs.cyfronet.pl/display/ISEPOS/GDF+v2.2+-+description
|
|
||||||
function lineFormat = getLineFormat(fieldTypes)
|
|
||||||
lineFormat = '';
|
|
||||||
for f = 1:length(fieldTypes)
|
|
||||||
fieldType = fieldTypes{f};
|
|
||||||
formatSpec = getFormatSpec(fieldType);
|
|
||||||
lineFormat = [lineFormat ',' formatSpec];
|
|
||||||
end
|
|
||||||
lineFormat = [lineFormat(2:end) '\n'];
|
|
||||||
end
|
|
||||||
|
|
||||||
function formatSpec = getFormatSpec(fieldType)
|
|
||||||
if isTime(fieldType)
|
|
||||||
formatSpec = '"%s"';
|
|
||||||
elseif isnumeric(fieldType)
|
|
||||||
if fieldType < 10
|
|
||||||
formatSpec = getFormatSpecFromReservedFieldType(fieldType);
|
|
||||||
elseif fieldType < 200
|
|
||||||
formatSpec = ['%.', num2str(mod(fieldType, 10)), 'f'];
|
|
||||||
elseif fieldType < 300
|
|
||||||
formatSpec = ['%.', num2str(mod(fieldType, 10)), 'e'];
|
|
||||||
else
|
|
||||||
error(['Unrecognized fieldType: ', num2str(fieldType)])
|
|
||||||
end
|
|
||||||
else
|
|
||||||
error(['Unrecognized fieldType: ', num2str(fieldType)])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function formatSpec = getFormatSpecFromReservedFieldType(fieldType)
|
|
||||||
switch fieldType
|
|
||||||
case 1
|
|
||||||
formatSpec = '%f';
|
|
||||||
case 2
|
|
||||||
formatSpec = '%d';
|
|
||||||
case 3
|
|
||||||
formatSpec = '"%s"';
|
|
||||||
case 4
|
|
||||||
formatSpec = '%.1f';
|
|
||||||
case 5
|
|
||||||
formatSpec = '%s';
|
|
||||||
case 6
|
|
||||||
formatSpec = '%1.1e';
|
|
||||||
case 7
|
|
||||||
formatSpec = '%1.2e';
|
|
||||||
otherwise
|
|
||||||
error(['Unrecognized fieldType: ', num2str(fieldType)])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
# matlab/export//isTime.m
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2022 ACK Cyfronet AGH, Poland.
|
|
||||||
% -----------------
|
|
||||||
%
|
|
||||||
% Check if the format defined in 'fieldType' marks time or other type of field.
|
|
||||||
% Based on numeric 'FieldType' (GDF) or 'type' (catalog) field
|
|
||||||
% see https://docs.cyfronet.pl/display/ISEPOS/GDF+v2.2+-+description
|
|
||||||
function isTime = isTime(fieldType)
|
|
||||||
isTime = fieldType == 5 || fieldType(1) == '5';
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
@ -1,186 +0,0 @@
|
|||||||
% function [msyn,a,b,sa,sb,cc,rms,N]=LinReg(m1,m2,opt,n)
|
|
||||||
% -----------------------------------------------------------------------------------------------
|
|
||||||
% The program calculates the linear regression parameters (a and b)
|
|
||||||
% and their uncertainties by applying different regression techniques.
|
|
||||||
% -----
|
|
||||||
% The program calls the following functions, appended in the scipt:
|
|
||||||
% function [a,b,sa,sb,rms,N]=GOR(m1,m2,n) - General Orthogonal Regression
|
|
||||||
% function [a b sa sb rms]=OLS(m1,m2) - Ordinary Least Squares
|
|
||||||
% function [a b sa sb rms]=ILS(m1,m2) - Inverted Least Squares
|
|
||||||
% -----------------------------------------------------------------------------------------------
|
|
||||||
% COMPLIED BY:
|
|
||||||
% Leptokaropoulos Konstantinos, June 2014, (kleptoka@igf.edu.pl) within IS-EPOS Project,
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
% DESCRIPTION
|
|
||||||
% The program is used to establish relationships between different magnitude scales.
|
|
||||||
% These relationships can be then used for converting diverse magnitude scales into a
|
|
||||||
% common one, such that a homogeneous magnitude is applied for further analyses.
|
|
||||||
% The suggested method is GOR, because the ordinary least-squares assumes that
|
|
||||||
% there are no uncertainties in the values of the independent variable, a condition that
|
|
||||||
% is never truth when dealing with earthquake magnitudes. This may introduce
|
|
||||||
% systematic errors in magnitude conversion, apparent catalog incompleteness, and
|
|
||||||
% significant bias in the estimates of the b-value. According to GOR technique, the
|
|
||||||
% projection of the independent variable is done along a weighted orthogonal distance
|
|
||||||
% from the linear fitting curve. Although the error varriance ratio of the two variables
|
|
||||||
% is introduced in calculations, Castellaro et al. (2006) showed that even if the applied
|
|
||||||
% values differ from the real ones, GOR method still performs better than the OLS.
|
|
||||||
% Despite the GOR superiority, OLS and ILS are also provided as supplementary
|
|
||||||
% options in the present program, for comparison.
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
% INPUT PARAMETERS:
|
|
||||||
% m1 - variable 1 (e.g. magnitude 1) [VECTOR]
|
|
||||||
% m2 - variable 2 (e.g. magnitude 2) [VECTOR]
|
|
||||||
% opt - regression method selection (1,2 or 3): [SCALAR] (Default=1)
|
|
||||||
% 1 - General Orthogonal Regression - GOR function
|
|
||||||
% 2 - Ordinary Least Squares - OLS function
|
|
||||||
% 3 - Inverted Least Squares - ILS function
|
|
||||||
% n - error variance ratio (var(m2)/var(m1)) [SCALAR] (FOR GOR only, Default=1)
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
% OUTPUT:
|
|
||||||
% msyn - Converted Magnitude ([VECTOR]
|
|
||||||
% a - intercept [SCALAR]
|
|
||||||
% b - slope [SCALAR]
|
|
||||||
% sa - 95% confidence interval for intercept [SCALAR]
|
|
||||||
% sb - 95% confidence interval for slope [SCALAR]
|
|
||||||
% cc - Pearson correlation coefficient between vectors m1 and m2 [SCALAR-optional]
|
|
||||||
% rms - root mean square error [SCALAR-optional]
|
|
||||||
% N - number of data pairs m1,m2 [SCALAR-optional]
|
|
||||||
% ---------------------------------------------------------------------------------------------
|
|
||||||
% REFERENCES
|
|
||||||
% **** (for General Orthogonal Regression) ****
|
|
||||||
% Fuller,W. A. (1987). Measurement Error Models,Wiley, New York, 440 pp.
|
|
||||||
% Castellaro, S., F. Mulargia, and Y. Y. Kagan (2006). Regression problems for magnitudes,
|
|
||||||
% Geophys. J. Int. 165, 913–930.
|
|
||||||
% Lolli, B., and P. Gasperini (2012). A comparison among general orthogonal regression methods
|
|
||||||
% applied to earthquake magnitude conversions, Geophys. J. Int. 190, 1135–1151.
|
|
||||||
% Wason, H. R., R. Das, and M. L. Sharma (2012).Magnitude conversion problem using general
|
|
||||||
% orthogonal regression, Geophys. J. Int. 190, 1091–1096.
|
|
||||||
% ----------------------------------------------------------------------------------------------
|
|
||||||
% LICENSE
|
|
||||||
% This file is a part of the IS-EPOS e-PLATFORM.
|
|
||||||
%
|
|
||||||
% This is free software: you can redistribute it and/or modify it under
|
|
||||||
% the terms of the GNU General Public License as published by the Free
|
|
||||||
% Software Foundation, either version 3 of the License, or
|
|
||||||
% (at your option) any later version.
|
|
||||||
%
|
|
||||||
% This program is distributed in the hope that it will be useful,
|
|
||||||
% but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
% GNU General Public License for more details.
|
|
||||||
%
|
|
||||||
% You should have received a copy of the GNU General Public License
|
|
||||||
% along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
function [msyn,a,b,sa,sb,cc,rms,N]=LinReg(m1,m2,opt,n)
|
|
||||||
|
|
||||||
%%%%% Remove NaN values - K03JUN2015 %%%%%
|
|
||||||
mm1=m1(isnan(m1)==0 & isnan(m2)==0);
|
|
||||||
mm2=m2(isnan(m1)==0 & isnan(m2)==0);
|
|
||||||
m1=mm1;m2=mm2;
|
|
||||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
||||||
|
|
||||||
|
|
||||||
if nargin==2, opt=1; n=1;
|
|
||||||
elseif nargin==3 , n=1;end
|
|
||||||
|
|
||||||
% CC
|
|
||||||
cc=corrcoef(m1,m2);
|
|
||||||
cc=cc(2);
|
|
||||||
N=length(m1);
|
|
||||||
|
|
||||||
if opt==1
|
|
||||||
[a,b,sa,sb,rms]=GOR(m1,m2,n);
|
|
||||||
elseif opt==2
|
|
||||||
[a,b,sa,sb,rms]=OLS(m1,m2);
|
|
||||||
elseif opt==3
|
|
||||||
[a,b,sa,sb,rms]=ILS(m1,m2);
|
|
||||||
end
|
|
||||||
msyn=m1*b+a;
|
|
||||||
end
|
|
||||||
|
|
||||||
% ********************************************************
|
|
||||||
% ------------------------- GOR --------------------------------
|
|
||||||
function [a,b,sa,sb,rms,N]=GOR(m1,m2,n)
|
|
||||||
|
|
||||||
if nargin==2 || isempty(n),n=1;end
|
|
||||||
|
|
||||||
%GOR parameters
|
|
||||||
f=1; %alternative f=0
|
|
||||||
cova=cov(m1,m2,f);
|
|
||||||
mx=var(m1,f);
|
|
||||||
my=var(m2,f);
|
|
||||||
mxy=cova(1,2);
|
|
||||||
|
|
||||||
b=(my-n*mx+sqrt((my-n*mx)^2+4*n*mxy^2))/(2*mxy);
|
|
||||||
a=mean(m2)-b*mean(m1);
|
|
||||||
|
|
||||||
% BASIC STATISTICS
|
|
||||||
msyn=b*m1+a;
|
|
||||||
rms=sqrt(sum((m2-msyn).^2)/(length(m2)));
|
|
||||||
N=length(m1);
|
|
||||||
|
|
||||||
% Confidence Intervals of a and b-values
|
|
||||||
d=(my-n*mx);
|
|
||||||
|
|
||||||
sx=(sqrt(d^2+4*n*mxy^2)-d)/(2*n);
|
|
||||||
sy=(my+n*mx-sqrt(d^2+4*n*mxy^2))/(2*n);
|
|
||||||
|
|
||||||
s=((N-1)*(n+b^2)*sy)/(N-2);
|
|
||||||
|
|
||||||
Vb=(mx*s-(b*sy)^2)/((N-1)*sx^2);
|
|
||||||
Va=s/N+Vb*mean(m1)^2;
|
|
||||||
|
|
||||||
sb=sqrt(Vb)*tinv(0.975,N-2);
|
|
||||||
sa=sqrt(Va)*tinv(0.975,N-2);
|
|
||||||
end
|
|
||||||
|
|
||||||
% ----------------------------- OLS ----------------------------
|
|
||||||
function [a b sa sb rms]=OLS(m1,m2)
|
|
||||||
f=1;N=length(m1);
|
|
||||||
cova=cov(m1,m2,f);
|
|
||||||
mx=var(m1,f);
|
|
||||||
my=var(m2,f);
|
|
||||||
mxy=cova(1,2);
|
|
||||||
|
|
||||||
p=polyfit(m1,m2,1);
|
|
||||||
|
|
||||||
vb=my/sum((m1-mean(m1)).^2);
|
|
||||||
va=my*sum(m1.^2)/(N*sum((m1-mean(m1)).^2));
|
|
||||||
sb=tinv(0.975,N-2)*sqrt(vb);
|
|
||||||
sa=tinv(0.975,N-2)*sqrt(va);
|
|
||||||
b=p(1);a=p(2);
|
|
||||||
|
|
||||||
msyn=b*m1+a;
|
|
||||||
rms=sqrt(sum((m2-msyn).^2)/(length(m2)));
|
|
||||||
end
|
|
||||||
|
|
||||||
% ----------------------------- ILS ----------------------------
|
|
||||||
function [a b sa sb rms]=ILS(m1,m2)
|
|
||||||
f=1;N=length(m1);
|
|
||||||
cova=cov(m1,m2,f);
|
|
||||||
mx=var(m2,f);
|
|
||||||
my=var(m1,f);
|
|
||||||
mxy=cova(1,2);
|
|
||||||
|
|
||||||
p=polyfit(m2,m1,1);
|
|
||||||
q=[1/p(1) -p(2)/p(1)];
|
|
||||||
|
|
||||||
|
|
||||||
vb=my/sum((m2-mean(m2)).^2);
|
|
||||||
va=my*sum(m2.^2)/(N*sum((m2-mean(m2)).^2));
|
|
||||||
sb=tinv(0.975,N-2)*sqrt(vb);
|
|
||||||
sa=tinv(0.975,N-2)*sqrt(va);
|
|
||||||
|
|
||||||
sq1=abs(sb*q(1)/p(1));
|
|
||||||
sq2=abs(sb*q(2)/p(2));
|
|
||||||
b=q(1);a=q(2);sb=sq1;sa=sq2;
|
|
||||||
|
|
||||||
msyn=b*m1+a;
|
|
||||||
rms=sqrt(sum((m2-msyn).^2)/(length(m2)));
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
|||||||
%
|
|
||||||
% -----------------
|
|
||||||
% Copyright © 2019 ACK Cyfronet AGH, Poland.
|
|
||||||
%
|
|
||||||
% This work was partially funded by EPOS Project funded in frame of PL-POIR4.2
|
|
||||||
% --------------
|
|
||||||
%
|
|
||||||
function [msyn, outData, m1, m2] = LinRegWrapper(catalog, m1Type, m2Type, opt, n)
|
|
||||||
[m1, m2] = extractColumns(catalog, m1Type, m2Type);
|
|
||||||
[m1, m2] = eliminateEmptyValues(m1, m2);
|
|
||||||
if isLogarithmComparableToMagnitude(m1Type); m1 = log10(m1); end
|
|
||||||
if isLogarithmComparableToMagnitude(m2Type); m2 = log10(m2); end
|
|
||||||
|
|
||||||
if exist('OCTAVE_VERSION', 'builtin') > 0
|
|
||||||
[msyn, a, b, sa, sb, cc, rms, N] = LinReg_O(m1, m2, opt, n);
|
|
||||||
else
|
|
||||||
[msyn, a, b, sa, sb, cc, rms, N] = LinReg(m1, m2, opt, n);
|
|
||||||
end
|
|
||||||
|
|
||||||
outData.a = a;
|
|
||||||
outData.b = b;
|
|
||||||
outData.sa = sa;
|
|
||||||
outData.sb = sb;
|
|
||||||
outData.cc = cc;
|
|
||||||
outData.rms = rms;
|
|
||||||
outData.N = N;
|
|
||||||
end
|
|
||||||
|
|
||||||
function result = isLogarithmComparableToMagnitude(columnType)
|
|
||||||
result = strcmp(columnType, 'E') || strcmp(columnType, 'M0');
|
|
||||||
end
|
|
||||||
|
|
@ -1,186 +0,0 @@
|
|||||||
% function [msyn,a,b,sa,sb,cc,rms,N]=LinReg_O(m1,m2,opt,n) - Octave Compatible Version
|
|
||||||
% -----------------------------------------------------------------------------------------------
|
|
||||||
% The program calculates the linear regression parameters (a and b)
|
|
||||||
% and their uncertainties by applying different regression techniques.
|
|
||||||
% -----
|
|
||||||
% The program calls the following functions, appended in the scipt:
|
|
||||||
% function [a,b,sa,sb,rms,N]=GOR(m1,m2,n) - General Orthogonal Regression
|
|
||||||
% function [a b sa sb rms]=OLS(m1,m2) - Ordinary Least Squares
|
|
||||||
% function [a b sa sb rms]=ILS(m1,m2) - Inverted Least Squares
|
|
||||||
% -----------------------------------------------------------------------------------------------
|
|
||||||
% COMPLIED BY:
|
|
||||||
% Leptokaropoulos Konstantinos, June 2014, (kleptoka@igf.edu.pl) within IS-EPOS Project,
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
% DESCRIPTION
|
|
||||||
% The program is used to establish relationships between different magnitude scales.
|
|
||||||
% These relationships can be then used for converting diverse magnitude scales into a
|
|
||||||
% common one, such that a homogeneous magnitude is applied for further analyses.
|
|
||||||
% The suggested method is GOR, because the ordinary least-squares assumes that
|
|
||||||
% there are no uncertainties in the values of the independent variable, a condition that
|
|
||||||
% is never truth when dealing with earthquake magnitudes. This may introduce
|
|
||||||
% systematic errors in magnitude conversion, apparent catalog incompleteness, and
|
|
||||||
% significant bias in the estimates of the b-value. According to GOR technique, the
|
|
||||||
% projection of the independent variable is done along a weighted orthogonal distance
|
|
||||||
% from the linear fitting curve. Although the error varriance ratio of the two variables
|
|
||||||
% is introduced in calculations, Castellaro et al. (2006) showed that even if the applied
|
|
||||||
% values differ from the real ones, GOR method still performs better than the OLS.
|
|
||||||
% Despite the GOR superiority, OLS and ILS are also provided as supplementary
|
|
||||||
% options in the present program, for comparison.
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
% INPUT PARAMETERS:
|
|
||||||
% m1 - variable 1 (e.g. magnitude 1) [VECTOR]
|
|
||||||
% m2 - variable 2 (e.g. magnitude 2) [VECTOR]
|
|
||||||
% opt - regression method selection (1,2 or 3): [SCALAR] (Default=1)
|
|
||||||
% 1 - General Orthogonal Regression - GOR function
|
|
||||||
% 2 - Ordinary Least Squares - OLS function
|
|
||||||
% 3 - Inverted Least Squares - ILS function
|
|
||||||
% n - error variance ratio (var(m2)/var(m1)) [SCALAR] (FOR GOR only, Default=1)
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
% OUTPUT:
|
|
||||||
% msyn - Converted Magnitude ([VECTOR]
|
|
||||||
% a - intercept [SCALAR]
|
|
||||||
% b - slope [SCALAR]
|
|
||||||
% sa - 95% confidence interval for intercept [SCALAR]
|
|
||||||
% sb - 95% confidence interval for slope [SCALAR]
|
|
||||||
% cc - Pearson correlation coefficient between vectors m1 and m2 [SCALAR-optional]
|
|
||||||
% rms - root mean square error [SCALAR-optional]
|
|
||||||
% N - number of data pairs m1,m2 [SCALAR-optional]
|
|
||||||
% ---------------------------------------------------------------------------------------------
|
|
||||||
% REFERENCES
|
|
||||||
% **** (for General Orthogonal Regression) ****
|
|
||||||
% Fuller,W. A. (1987). Measurement Error Models,Wiley, New York, 440 pp.
|
|
||||||
% Castellaro, S., F. Mulargia, and Y. Y. Kagan (2006). Regression problems for magnitudes,
|
|
||||||
% Geophys. J. Int. 165, 913–930.
|
|
||||||
% Lolli, B., and P. Gasperini (2012). A comparison among general orthogonal regression methods
|
|
||||||
% applied to earthquake magnitude conversions, Geophys. J. Int. 190, 1135–1151.
|
|
||||||
% Wason, H. R., R. Das, and M. L. Sharma (2012).Magnitude conversion problem using general
|
|
||||||
% orthogonal regression, Geophys. J. Int. 190, 1091–1096.
|
|
||||||
% ----------------------------------------------------------------------------------------------
|
|
||||||
% LICENSE
|
|
||||||
% This file is a part of the IS-EPOS e-PLATFORM.
|
|
||||||
%
|
|
||||||
% This is free software: you can redistribute it and/or modify it under
|
|
||||||
% the terms of the GNU General Public License as published by the Free
|
|
||||||
% Software Foundation, either version 3 of the License, or
|
|
||||||
% (at your option) any later version.
|
|
||||||
%
|
|
||||||
% This program is distributed in the hope that it will be useful,
|
|
||||||
% but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
% GNU General Public License for more details.
|
|
||||||
%
|
|
||||||
% You should have received a copy of the GNU General Public License
|
|
||||||
% along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
% ------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
function [msyn,a,b,sa,sb,cc,rms,N]=LinReg_O(m1,m2,opt,n)
|
|
||||||
|
|
||||||
%%%%% Remove NaN values - K03JUN2015 %%%%%
|
|
||||||
mm1=m1(isnan(m1)==0 & isnan(m2)==0);
|
|
||||||
mm2=m2(isnan(m1)==0 & isnan(m2)==0);
|
|
||||||
m1=mm1;m2=mm2;
|
|
||||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
||||||
|
|
||||||
if nargin==2, opt=1; n=1;
|
|
||||||
elseif nargin==3 , n=1;end
|
|
||||||
|
|
||||||
% CC
|
|
||||||
cc=corr(m1,m2);
|
|
||||||
N=length(m1);
|
|
||||||
|
|
||||||
if opt==1
|
|
||||||
[a,b,sa,sb,rms]=GOR(m1,m2,n);
|
|
||||||
elseif opt==2
|
|
||||||
[a,b,sa,sb,rms]=OLS(m1,m2);
|
|
||||||
elseif opt==3
|
|
||||||
[a,b,sa,sb,rms]=ILS(m1,m2);
|
|
||||||
end
|
|
||||||
msyn=m1*b+a;
|
|
||||||
end
|
|
||||||
|
|
||||||
% ********************************************************
|
|
||||||
% ------------------------- GOR --------------------------------
|
|
||||||
function [a,b,sa,sb,rms,N]=GOR(m1,m2,n)
|
|
||||||
|
|
||||||
if nargin==2 || isempty(n),n=1;end
|
|
||||||
|
|
||||||
%GOR parameters
|
|
||||||
f=1; %alternative f=0
|
|
||||||
mx=var(m1,f);
|
|
||||||
my=var(m2,f);
|
|
||||||
mxy=cov(m1,m2,f);
|
|
||||||
%mxy=mxy(2); Enable for Matlab version
|
|
||||||
|
|
||||||
b=(my-n*mx+sqrt((my-n*mx)^2+4*n*mxy^2))/(2*mxy);
|
|
||||||
a=mean(m2)-b*mean(m1);
|
|
||||||
|
|
||||||
% BASIC STATISTICS
|
|
||||||
msyn=b*m1+a;
|
|
||||||
rms=sqrt(sum((m2-msyn).^2)/(length(m2)));
|
|
||||||
N=length(m1);
|
|
||||||
|
|
||||||
% Confidence Intervals of a and b-values
|
|
||||||
d=(my-n*mx);
|
|
||||||
|
|
||||||
sx=(sqrt(d^2+4*n*mxy^2)-d)/(2*n);
|
|
||||||
sy=(my+n*mx-sqrt(d^2+4*n*mxy^2))/(2*n);
|
|
||||||
|
|
||||||
s=((N-1)*(n+b^2)*sy)/(N-2);
|
|
||||||
|
|
||||||
Vb=(mx*s-(b*sy)^2)/((N-1)*sx^2);
|
|
||||||
Va=s/N+Vb*mean(m1)^2;
|
|
||||||
|
|
||||||
sb=sqrt(Vb)*tinv(0.975,N-2);
|
|
||||||
sa=sqrt(Va)*tinv(0.975,N-2);
|
|
||||||
end
|
|
||||||
|
|
||||||
% ----------------------------- OLS ----------------------------
|
|
||||||
function [a b sa sb rms]=OLS(m1,m2)
|
|
||||||
f=1;N=length(m1);
|
|
||||||
mxy=cov(m1,m2,f);
|
|
||||||
mx=var(m1,f);
|
|
||||||
my=var(m2,f);
|
|
||||||
|
|
||||||
p=polyfit(m1,m2,1);
|
|
||||||
|
|
||||||
vb=my/sum((m1-mean(m1)).^2);
|
|
||||||
va=my*sum(m1.^2)/(N*sum((m1-mean(m1)).^2));
|
|
||||||
sb=tinv(0.975,N-2)*sqrt(vb);
|
|
||||||
sa=tinv(0.975,N-2)*sqrt(va);
|
|
||||||
b=p(1);a=p(2);
|
|
||||||
|
|
||||||
msyn=b*m1+a;
|
|
||||||
rms=sqrt(sum((m2-msyn).^2)/(length(m2)));
|
|
||||||
end
|
|
||||||
|
|
||||||
% ----------------------------- ILS ----------------------------
|
|
||||||
function [a b sa sb rms]=ILS(m1,m2)
|
|
||||||
f=1;N=length(m1);
|
|
||||||
mxy=cov(m1,m2,f);
|
|
||||||
mx=var(m1,f);
|
|
||||||
my=var(m2,f);
|
|
||||||
|
|
||||||
p=polyfit(m2,m1,1);
|
|
||||||
q=[1/p(1) -p(2)/p(1)];
|
|
||||||
|
|
||||||
|
|
||||||
vb=my/sum((m2-mean(m2)).^2);
|
|
||||||
va=my*sum(m2.^2)/(N*sum((m2-mean(m2)).^2));
|
|
||||||
sb=tinv(0.975,N-2)*sqrt(vb);
|
|
||||||
sa=tinv(0.975,N-2)*sqrt(va);
|
|
||||||
|
|
||||||
sq1=abs(sb*q(1)/p(1));
|
|
||||||
sq2=abs(sb*q(2)/p(2));
|
|
||||||
b=q(1);a=q(2);sb=sq1;sa=sq2;
|
|
||||||
|
|
||||||
msyn=b*m1+a;
|
|
||||||
rms=sqrt(sum((m2-msyn).^2)/(length(m2)));
|
|
||||||
end
|
|
||||||
|
|
||||||
% Testing plotting
|
|
||||||
%plot(c.Ml,c.Mw,'ko')
|
|
||||||
%hold on
|
|
||||||
%plot([min(c.Ml) max(c.Ml)],[min(c.Ml)*b+a max(c.Ml)*b+a],'r--')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,287 +0,0 @@
|
|||||||
function F=readsac(files)
|
|
||||||
% F=readsac('files')
|
|
||||||
%
|
|
||||||
% Read a SAC-file or a set of SAC-files.
|
|
||||||
%
|
|
||||||
% Input :
|
|
||||||
% 'files' corresponds either to the complete name of one SAC-file,
|
|
||||||
% either to a set of SAC-files with the logical expression *.
|
|
||||||
% Output :
|
|
||||||
% F is a structure (length equal to the number of SAC-files read).
|
|
||||||
% It contains ALL the fields of the SAC's header and the trace in
|
|
||||||
% the field "trace".
|
|
||||||
%
|
|
||||||
% EXAMPLE :
|
|
||||||
% F=readsac('2004.02.23-17.31.00.ABH.SHZ.SAC')
|
|
||||||
% reads the file 2004.02.23-17.31.00.ABH.SHZ.SAC and saves it into
|
|
||||||
% the structure F (dimension: 1*1).
|
|
||||||
% F=readsac('Data/*BHZ*.SAC')
|
|
||||||
% reads all the files of the form *BHZ*.SAC in the directory "Data".
|
|
||||||
% The size of F equals the number of these files.
|
|
||||||
%
|
|
||||||
% From J. Vergne and G. Hetenyi
|
|
||||||
|
|
||||||
|
|
||||||
%------------------------------------------------------------------
|
|
||||||
% By default, the signals are read in mode 'r'
|
|
||||||
modlect='r';
|
|
||||||
|
|
||||||
% Determine the type of "files"
|
|
||||||
rep_files=fileparts(files);
|
|
||||||
list_files=dir(files);
|
|
||||||
|
|
||||||
if length(list_files)==0
|
|
||||||
% disp('"readsac": File(s) do not exist');
|
|
||||||
F.tau=-1;
|
|
||||||
else
|
|
||||||
|
|
||||||
for ifile=1:length(list_files)
|
|
||||||
|
|
||||||
nomfich=fullfile(rep_files,list_files(ifile).name);
|
|
||||||
clear dsac;
|
|
||||||
|
|
||||||
% Read signals
|
|
||||||
% ------------
|
|
||||||
% Following the signal type, reading procedure can be different:
|
|
||||||
% 'l' - IEEE floating point with little-endian byte ordering
|
|
||||||
% 'b' - IEEE floating point with big-endian byte ordering
|
|
||||||
|
|
||||||
[C,MAXSIZE,ENDIAN]=computer;
|
|
||||||
if ENDIAN=='L' bool_l=0;
|
|
||||||
else bool_l=1;
|
|
||||||
end
|
|
||||||
|
|
||||||
fidev=fopen(nomfich,modlect,'l');
|
|
||||||
if fidev > 0
|
|
||||||
|
|
||||||
h1=fread(fidev,70,'float'); % --------------
|
|
||||||
h2=fread(fidev,40,'long'); % reading header
|
|
||||||
h3=fread(fidev,192,'uchar'); % --------------
|
|
||||||
|
|
||||||
% testing byte-order, h2(7) must! be 6.
|
|
||||||
if h2(7)~=6
|
|
||||||
bool_l=1;
|
|
||||||
fclose(fidev);
|
|
||||||
fidev=fopen(nomfich,modlect,'b');
|
|
||||||
h1=fread(fidev,70,'float');
|
|
||||||
h2=fread(fidev,40,'long');
|
|
||||||
h3=fread(fidev,192,'uchar');
|
|
||||||
end
|
|
||||||
|
|
||||||
% reading trace
|
|
||||||
tamp=fread(fidev,inf,'float');
|
|
||||||
|
|
||||||
dsac.h1=h1;
|
|
||||||
dsac.h2=h2;
|
|
||||||
dsac.h3=h3;
|
|
||||||
|
|
||||||
|
|
||||||
% PART 1: reading float-type parameters
|
|
||||||
% ------------------------------------------
|
|
||||||
% %- comment are from original version,
|
|
||||||
% % comments ares from SAC-homepage
|
|
||||||
|
|
||||||
dsac.delta=h1(1); %- sampling time interval
|
|
||||||
dsac.depmin=h1(2); % minimum value of dependent variable
|
|
||||||
dsac.depmax=h1(3); % maximum value of dependent variable
|
|
||||||
dsac.scale=h1(4); % multiplying scale factor for dependent variable (not currently used)
|
|
||||||
dsac.odelta=h1(5); % observed increment if different from nominal value
|
|
||||||
|
|
||||||
dsac.b=h1(6); %- begin time (d<EFBFBD>calage du 1er <EFBFBD>chantillon) (beginning value of independent variable)
|
|
||||||
dsac.e=h1(7); % ending value of independent variable
|
|
||||||
dsac.o=h1(8); %- event origin time (seconds relative to reference time)
|
|
||||||
dsac.a=h1(9); % first arrival time (seconds relative to reference time)
|
|
||||||
dsac.internal10=h1(10); % INTERNAL
|
|
||||||
|
|
||||||
dsac.t0=h1(11); %- user defined time picks or markers
|
|
||||||
dsac.t1=h1(12); %- (seconds relative to reference time)
|
|
||||||
dsac.t2=h1(13); %-
|
|
||||||
dsac.t3=h1(14); %-
|
|
||||||
dsac.t4=h1(15); %-
|
|
||||||
dsac.t5=h1(16); %
|
|
||||||
dsac.t6=h1(17); %
|
|
||||||
dsac.t7=h1(18); %
|
|
||||||
dsac.t8=h1(19); %
|
|
||||||
dsac.t9=h1(20); %
|
|
||||||
|
|
||||||
dsac.f=h1(21); % fini or end of event time (seconds relative to reference time)
|
|
||||||
dsac.resp0=h1(22); % instrument response parameters (not currently used)
|
|
||||||
dsac.resp1=h1(23); %
|
|
||||||
dsac.resp2=h1(24); %
|
|
||||||
dsac.resp3=h1(25); %
|
|
||||||
dsac.resp4=h1(26); %
|
|
||||||
dsac.resp5=h1(27); %
|
|
||||||
dsac.resp6=h1(28); %
|
|
||||||
dsac.resp7=h1(29); %
|
|
||||||
dsac.resp8=h1(30); %
|
|
||||||
dsac.resp9=h1(31); %
|
|
||||||
|
|
||||||
|
|
||||||
dsac.stla=h1(32); %- station latitude (degrees, north positive)
|
|
||||||
dsac.stlo=h1(33); %- station longitude (degrees, east positive)
|
|
||||||
dsac.stel=h1(34); %- station elevation (meters)
|
|
||||||
dsac.stdp=h1(35); % station depth below surface (meters)(not currently used)
|
|
||||||
|
|
||||||
dsac.evla=h1(36); %- event latitude (degrees, north positive)
|
|
||||||
dsac.evlo=h1(37); %- event longitude (degrees, east positive)
|
|
||||||
dsac.evel=h1(38); % event elevation (meters)(not currently used)
|
|
||||||
dsac.evdp=h1(39); %- event depth below surface (meters)
|
|
||||||
dsac.mag=h1(40); % event magnitude
|
|
||||||
|
|
||||||
|
|
||||||
dsac.user0=h1(41); %- used defined variable storage area, floating!
|
|
||||||
dsac.user1=h1(42); %-
|
|
||||||
dsac.user2=h1(43); %-
|
|
||||||
dsac.user3=h1(44); %
|
|
||||||
dsac.user4=h1(45); %
|
|
||||||
dsac.user5=h1(46); %
|
|
||||||
dsac.user6=h1(47); %
|
|
||||||
dsac.user7=h1(48); %
|
|
||||||
dsac.user8=h1(49); %
|
|
||||||
dsac.user9=h1(50); %
|
|
||||||
|
|
||||||
dsac.dist=h1(51); %- station to event distance (km)
|
|
||||||
dsac.az=h1(52); %- event to station azimuth (degrees)
|
|
||||||
dsac.baz=h1(53); %- station to event azimuth (degrees)
|
|
||||||
dsac.gcarc=h1(54); %- station to event great circle arc length (degrees)
|
|
||||||
dsac.internal55=h1(55); % INTERNAL
|
|
||||||
|
|
||||||
dsac.internal56=h1(56); % INTERNAL
|
|
||||||
dsac.depmen=h1(57); % mean value of dependent variable
|
|
||||||
dsac.cmpaz=h1(58); %- component azimuth (degrees clockwise from north)
|
|
||||||
dsac.cmpinc=h1(59); %- component incidence angle (degrees from vertical)
|
|
||||||
|
|
||||||
dsac.xminimum=h1(60); % minimum value of X (spectral files only)
|
|
||||||
dsac.xmaximum=h1(61); % maximum value of X (spectral files only)
|
|
||||||
dsac.yminimum=h1(62); % minimum value of Y (spectral files only)
|
|
||||||
dsac.ymaximum=h1(63); % maximum value of Y (spectral files only)
|
|
||||||
dsac.unused64=h1(64); % UNUSED
|
|
||||||
dsac.unused65=h1(65); % UNUSED
|
|
||||||
dsac.unused66=h1(66); % UNUSED
|
|
||||||
dsac.unused67=h1(67); % UNUSED
|
|
||||||
dsac.unused68=h1(68); % UNUSED
|
|
||||||
dsac.unused69=h1(69); % UNUSED
|
|
||||||
dsac.unused70=h1(70); % UNUSED
|
|
||||||
|
|
||||||
|
|
||||||
% PART 2: reading long-type parameters
|
|
||||||
% ------------------------------------
|
|
||||||
|
|
||||||
% GMT time corresponding to reference (0) time in file
|
|
||||||
dsac.nzyear=h2(1); %- year
|
|
||||||
dsac.nzjday=h2(2); %- julian day
|
|
||||||
dsac.nzhour=h2(3); %- hour
|
|
||||||
dsac.nzmin=h2(4); %- minute
|
|
||||||
dsac.nzsec=h2(5); % second
|
|
||||||
dsac.nzmsec=h2(6); % millisecond
|
|
||||||
dsac.sec=h2(5)+h2(6)/1000; %- full second (NOT an original SAC-attribute!)
|
|
||||||
|
|
||||||
dsac.nvhdr=h2(7); % header version number: 6!
|
|
||||||
dsac.norid=h2(8); % origin ID (CSS 3.0)
|
|
||||||
dsac.nevid=h2(9); % event ID (CSS 3.0)
|
|
||||||
|
|
||||||
dsac.npts=h2(10); %- number of points per data component
|
|
||||||
|
|
||||||
dsac.internal81=h2(11);% INTERNAL
|
|
||||||
dsac.nwfid=h2(12); % waveform ID (CSS 3.0)
|
|
||||||
dsac.nxsize=h2(13); % spectral length (spectral files only)
|
|
||||||
dsac.nysize=h2(14); % spectral width (spectral files only)
|
|
||||||
dsac.unused85=h2(15); % UNUSED
|
|
||||||
|
|
||||||
dsac.iftype=h2(16); % type of file (required)(see SAC-page)
|
|
||||||
dsac.idep=h2(17); % type of dependent variable (see SAC-page)
|
|
||||||
dsac.iztype=h2(18); % reference time equivalence (see SAC-page)
|
|
||||||
dsac.unused89=h2(19); % UNUSED
|
|
||||||
dsac.iinst=h2(20); % type of recording instrument (not currently used)
|
|
||||||
|
|
||||||
dsac.istreg=h2(21); % station geogrpahic region (not currently used)
|
|
||||||
dsac.ievreg=h2(22); % event geographic location (not currently used)
|
|
||||||
dsac.ievtyp=h2(23); % type of event (see SAC-page)
|
|
||||||
dsac.iqual=h2(24); % quality of data (not currently used)(see SAC-page)
|
|
||||||
dsac.isynth=h2(25); % synthetic data flag (not currently used)
|
|
||||||
|
|
||||||
dsac.imagtyp=h2(26); % magnitude type
|
|
||||||
dsac.imagsrc=h2(27); % source of magnitude information
|
|
||||||
dsac.unused98=h2(28); % UNUSED
|
|
||||||
dsac.unused99=h2(29); % UNUSED
|
|
||||||
dsac.unused100=h2(30); % UNUSED
|
|
||||||
|
|
||||||
dsac.unused101=h2(31); % UNUSED
|
|
||||||
dsac.unused102=h2(32); % UNUSED
|
|
||||||
dsac.unused103=h2(33); % UNUSED
|
|
||||||
dsac.unused104=h2(34); % UNUSED
|
|
||||||
dsac.unused105=h2(35); % UNUSED
|
|
||||||
|
|
||||||
dsac.leven=h2(36); % TRUE if data is evenly spaced
|
|
||||||
dsac.lspol=h2(37); % TRUE if station components have positive polarity (left-hand rule)
|
|
||||||
dsac.lovrok=h2(38); % TRUE if it is okay to overwrite this file on disk
|
|
||||||
dsac.lcalda=h2(39); % TRUE if DIST,AZ,BAZ and GCARC are to be calculated form station and event coordinates
|
|
||||||
dsac.unused110=h2(40); % UNUSED
|
|
||||||
|
|
||||||
|
|
||||||
% PART 3: reading uchar-type parameters
|
|
||||||
% -------------------------------------
|
|
||||||
|
|
||||||
imin1=min(find(h3(1:8)==0 | h3(1:8)==32));
|
|
||||||
imin2=min(find(h3(9:24)==0 | h3(9:24)==32));
|
|
||||||
|
|
||||||
dsac.kstnm=rm_blanc(h3(1:1+imin1-1))'; %- station name
|
|
||||||
dsac.kevnm=rm_blanc(h3(9:9+imin2-1))'; %- event name
|
|
||||||
|
|
||||||
dsac.khole=rm_blanc(h3(25:32))'; % hole identification if nuclear event
|
|
||||||
dsac.ko=rm_blanc(h3(33:40))'; % event origin time identification
|
|
||||||
dsac.ka=rm_blanc(h3(41:48))'; % first arrival time identification
|
|
||||||
|
|
||||||
dsac.kt0=rm_blanc(h3(49:56))'; %- user defined time pick identifications, h1(11:20)
|
|
||||||
dsac.kt1=rm_blanc(h3(57:64))'; %-
|
|
||||||
dsac.kt2=rm_blanc(h3(65:72))'; %-
|
|
||||||
dsac.kt3=rm_blanc(h3(73:80))'; %-
|
|
||||||
dsac.kt4=rm_blanc(h3(81:88))'; %-
|
|
||||||
dsac.kt5=rm_blanc(h3(89:96))'; %
|
|
||||||
dsac.kt6=rm_blanc(h3(97:104))'; %
|
|
||||||
dsac.kt7=rm_blanc(h3(105:112))'; %
|
|
||||||
dsac.kt8=rm_blanc(h3(113:120))'; %
|
|
||||||
dsac.kt9=rm_blanc(h3(121:128))'; %
|
|
||||||
|
|
||||||
dsac.kf=rm_blanc(h3(129:136))'; % fini identification
|
|
||||||
|
|
||||||
dsac.kuser0=rm_blanc(h3(137:144))'; %- user defined variable storage area
|
|
||||||
dsac.kuser1=rm_blanc(h3(145:152))'; %-
|
|
||||||
dsac.kuser2=rm_blanc(h3(153:160))'; %-
|
|
||||||
|
|
||||||
dsac.kcmpnm=rm_blanc(h3(161:168))'; %- component name
|
|
||||||
dsac.knetwk=rm_blanc(h3(169:176))'; % name of seismic network
|
|
||||||
dsac.kdatrd=rm_blanc(h3(177:184))'; % date data was read onto computer
|
|
||||||
|
|
||||||
dsac.kinst=rm_blanc(h3(185:192))'; %- generic name of recording instrument
|
|
||||||
|
|
||||||
|
|
||||||
% reading trace
|
|
||||||
% -------------
|
|
||||||
|
|
||||||
dsac.trace=tamp;
|
|
||||||
|
|
||||||
dsac.tau = 1; % Added to check if the file exist
|
|
||||||
fclose(fidev);
|
|
||||||
|
|
||||||
else
|
|
||||||
%disp(['"readsac": file ' nomfich ' : reading error - file does not exist'])
|
|
||||||
dsac.tau=-1;
|
|
||||||
end
|
|
||||||
F(ifile)=dsac;
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
% -------------------------------------------------------------
|
|
||||||
|
|
||||||
function ch1=rm_blanc(ch)
|
|
||||||
|
|
||||||
% looks for whitespace in 'ch' and removes them
|
|
||||||
if ischar(ch)
|
|
||||||
ch1=ch(find(double(ch)~=32) & double(ch)~=0);
|
|
||||||
else
|
|
||||||
ch1=ch(find(ch~=32 & ch~=0));
|
|
||||||
ch1=char(ch1);
|
|
||||||
end
|
|
@ -1,69 +0,0 @@
|
|||||||
function [] = sac2ascii(sac, asciiFilePath)
|
|
||||||
%sac2ascii Saves given sac data in ascii file in slist format
|
|
||||||
|
|
||||||
% check required fields
|
|
||||||
checkValue(sac.knetwk, 'knetwk');
|
|
||||||
checkValue(sac.kstnm, 'kstnm');
|
|
||||||
checkValue(sac.kcmpnm, 'kcmpnm');
|
|
||||||
checkValue(sac.delta, 'delta');
|
|
||||||
checkValue(sac.nzyear, 'nzyear');
|
|
||||||
checkValue(sac.nzjday, 'nzjday');
|
|
||||||
checkValue(sac.nzhour, 'nzhour');
|
|
||||||
checkValue(sac.nzmin, 'nzmin');
|
|
||||||
checkValue(sac.sec, 'sec');
|
|
||||||
|
|
||||||
networkCode = sac.knetwk;
|
|
||||||
stationCode = sac.kstnm;
|
|
||||||
locationCode = sac.khole;
|
|
||||||
if isUndefined(locationCode); locationCode = ''; end
|
|
||||||
channelCode = sac.kcmpnm;
|
|
||||||
frequency = int32(1 / sac.delta);
|
|
||||||
unit = extractUnit(sac);
|
|
||||||
startTime = extractStartTime(sac);
|
|
||||||
samples = length(sac.trace);
|
|
||||||
|
|
||||||
fileID = fopen(asciiFilePath, 'w');
|
|
||||||
fprintf(fileID, 'TIMESERIES %s_%s_%s_%s, %d samples, %d sps, %s, SLIST, FLOAT, %s\n',...
|
|
||||||
networkCode, stationCode, locationCode, channelCode, samples, frequency, startTime, unit);
|
|
||||||
fprintf(fileID, '%f\n', sac.trace);
|
|
||||||
fclose(fileID);
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
function [unit] = extractUnit(sac)
|
|
||||||
switch sac.idep
|
|
||||||
case 6
|
|
||||||
unit = 'NM';
|
|
||||||
case 7
|
|
||||||
unit = 'NM/S';
|
|
||||||
case 8
|
|
||||||
unit = 'NM/S^2';
|
|
||||||
otherwise
|
|
||||||
unit = 'COUNTS';
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function [startTime] = extractStartTime(sac)
|
|
||||||
% converting day of the year to day of the month
|
|
||||||
yearString = ['01-01-', num2str(sac.nzyear)];
|
|
||||||
date = datenum(yearString, 'dd-mm-yyyy') + sac.nzjday - 1;
|
|
||||||
dateString = datestr(date, 'yyyy-mm-dd');
|
|
||||||
|
|
||||||
startTime = sprintf('%sT%02d:%02d:%09.6f', dateString, sac.nzhour, sac.nzmin, sac.sec);
|
|
||||||
end
|
|
||||||
|
|
||||||
function [] = checkValue(value, fieldName)
|
|
||||||
if isUndefined(value)
|
|
||||||
error('sac does not contain required field - [%s]', fieldName);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
function [undefined] = isUndefined(value)
|
|
||||||
if isa(value, 'char')
|
|
||||||
undefined = strcmp(value, '-12345');
|
|
||||||
elseif isa(value, 'logical')
|
|
||||||
undefined = value == false;
|
|
||||||
else
|
|
||||||
undefined = abs(-12345 - value) < 1e-10;
|
|
||||||
end
|
|
||||||
end
|
|
Loading…
Reference in New Issue
Block a user