Skip to content

API Reference

This section contains the auto-generated documentation from the helper functions for LSA of each FMG-FMG and FMM-FMG models (lsa_api.md). Tasks raning from generating the derivative-based normalized local sensitivity indices and loading parameter to performing the monte-carlo simulation for calculating the indices and visualizing them are executed separately by various functions.

FMG-FMG Model

scripts.LSA.fmg_lsa_utils

generate_derivatives()

Defines symbolic equations and returns lambdified derivative functions. Returns: funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus. funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus.

Source code in scripts/LSA/fmg_lsa_utils.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def generate_derivatives():
    """Defines symbolic equations and returns lambdified derivative functions.
    Returns:
        funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus.
        funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus.
    """
    # Define model parameters and frequency as symbols
    E_c1, E_c2, tau_c1, tau_c2, alpha_1, alpha_2, w_freq = sp.symbols('E_c1 E_c2 tau_c1 tau_c2 alpha_1 alpha_2 w_freq')

    # Define the storage and loss moduli
    numerator_Ep_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * sp.cos(sp.pi*alpha_1/2) + (w_freq*tau_c1)**(2*alpha_1) )
    numerator_Ep_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * sp.cos(sp.pi*alpha_2/2) + (w_freq*tau_c2)**(2*alpha_2) )

    numerator_Epp_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * sp.sin(sp.pi*alpha_1/2) )
    numerator_Epp_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * sp.sin(sp.pi*alpha_2/2) )

    denominator_1 = ( 1 + (w_freq*tau_c1)**alpha_1 * sp.cos(sp.pi/2*alpha_1) + (w_freq*tau_c1)**(2*alpha_1) )
    denominator_2 = ( 1 + (w_freq*tau_c2)**alpha_2 * sp.cos(sp.pi/2*alpha_2) + (w_freq*tau_c2)**(2*alpha_2) )

    Ep = numerator_Ep_1 / denominator_1 + numerator_Ep_2 / denominator_2
    Epp = numerator_Epp_1 / denominator_1 + numerator_Epp_2 / denominator_2

    # Compute the derivatives and lambdify them for numerical evaluation
    vars_branch_1 = (E_c1, tau_c1, alpha_1, w_freq)
    vars_branch_2 = (E_c2, tau_c2, alpha_2, w_freq)

    funcs_dEp = {
        'E_c1': sp.lambdify(vars_branch_1, sp.diff(Ep, E_c1), 'numpy'),
        'tau_c1': sp.lambdify(vars_branch_1, sp.diff(Ep, tau_c1), 'numpy'),
        'alpha_1':  sp.lambdify(vars_branch_1, sp.diff(Ep, alpha_1), 'numpy'),
        'E_c2': sp.lambdify(vars_branch_2, sp.diff(Ep, E_c2), 'numpy'),
        'tau_c2': sp.lambdify(vars_branch_2, sp.diff(Ep, tau_c2), 'numpy'),
        'alpha_2':  sp.lambdify(vars_branch_2, sp.diff(Ep, alpha_2), 'numpy')
    }

    funcs_dEpp = {
        'E_c1': sp.lambdify(vars_branch_1, sp.diff(Epp, E_c1), 'numpy'),
        'tau_c1': sp.lambdify(vars_branch_1, sp.diff(Epp, tau_c1), 'numpy'),
        'alpha_1':  sp.lambdify(vars_branch_1, sp.diff(Epp, alpha_1), 'numpy'),
        'E_c2': sp.lambdify(vars_branch_2, sp.diff(Epp, E_c2), 'numpy'),
        'tau_c2': sp.lambdify(vars_branch_2, sp.diff(Epp, tau_c2), 'numpy'),
        'alpha_2':  sp.lambdify(vars_branch_2, sp.diff(Epp, alpha_2), 'numpy')
    }

    print("Derivative functions generated successfully.")
    return funcs_dEp, funcs_dEpp

load_parameter_bounds(file_path, rows, cols, gnp_index, std_fctr=0.05)

Loads Excel data and calculates uniform distribution bounds. Args: file_path (dict): Dictionary containing paths to Excel files. rows (dict): A dictionary containing the start and end rows for reading the optimized parameters. cols (dict): A dictionary containing the column indices for reading the optimized parameters. gnp_index (int): Index for the GnP parameter set to extract means from. std_fctr (float): Standard deviation factor for calculating bounds. Returns: bounds (list): List of tuples containing (a, b) bounds for uniform distribution.

Source code in scripts/LSA/fmg_lsa_utils.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def load_parameter_bounds(file_path, rows, cols, gnp_index, std_fctr=0.05):
    """Loads Excel data and calculates uniform distribution bounds.
    Args:
        file_path (dict): Dictionary containing paths to Excel files.
        rows (dict): A dictionary containing the start and end rows for reading the optimized parameters.
        cols (dict): A dictionary containing the column indices for reading the optimized parameters.
        gnp_index (int): Index for the GnP parameter set to extract means from.
        std_fctr (float): Standard deviation factor for calculating bounds.
    Returns:
        bounds (list): List of tuples containing (a, b) bounds for uniform distribution.
    """
    optimized_params_df = pd.read_excel(
        file_path['opt_path'], usecols=cols['cols_opt'], skiprows=rows['start_row_opt'],
        nrows=rows['end_row_opt'] - rows['start_row_opt'], header=None
    )
    data = optimized_params_df.to_numpy()

    mus = data[gnp_index]

    bounds = []
    for mu in mus:
        sigma = std_fctr * mu
        upper_bound = 0.5 * (np.sqrt(12) * sigma + 2 * mu)
        lower_bound = 2 * mu - upper_bound
        bounds.append((lower_bound, upper_bound))

    print("Parameter bounds calculated successfully.")
    return bounds

run_monte_carlo(funcs_dEp, funcs_dEpp, bounds, params_list, w_freq, n_mc=10 ** 3, batch_size=50000)

Executes the MC simulation and returns result arrays. Args: funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus. funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus. bounds (list): List of tuples containing lower and upper bounds for uniform distribution. w_freq (ndarray): Array of frequency values used in the simulation. n_mc (int): Number of Monte Carlo iterations. Returns: realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli.

Source code in scripts/LSA/fmg_lsa_utils.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def run_monte_carlo(funcs_dEp, funcs_dEpp, bounds, params_list, w_freq, n_mc=10**3, batch_size=50_000):
    """Executes the MC simulation and returns result arrays.
    Args:
        funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus.
        funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus.
        bounds (list): List of tuples containing lower and upper bounds for uniform distribution.
        w_freq (ndarray): Array of frequency values used in the simulation.
        n_mc (int): Number of Monte Carlo iterations.
    Returns:
        realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli.
    """
    (lower_bound1, upper_bound1), (lower_bound2, upper_bound2), (lower_bound3, upper_bound3) = bounds[:3]
    (lower_bound4, upper_bound4), (lower_bound5, upper_bound5), (lower_bound6, upper_bound6) = bounds[3:]

    # Initialize arrays to store normalized sensitivity indices for each modulus, parameter and frequency
    realized_indices = {
        'Ep': {p: np.zeros((n_mc, len(w_freq))) for p in params_list},
        'Epp': {p: np.zeros((n_mc, len(w_freq))) for p in params_list},
        'Ecomplex': {p: np.zeros((n_mc, len(w_freq))) for p in params_list},
    }

    # Process samples in batches
    print(f"Starting Monte Carlo simulation with {n_mc} iterations...")
    for start_idx in range(0, n_mc, batch_size):
        end_idx = min(start_idx + batch_size, n_mc)
        b_size = end_idx - start_idx

        # Generate random variables
        E_c1 = np.random.uniform(lower_bound1, upper_bound1, b_size).reshape(-1, 1)
        tau_c1 = np.random.uniform(lower_bound2, upper_bound2, b_size).reshape(-1, 1)
        alpha_1 = np.random.uniform(lower_bound3, upper_bound3, b_size).reshape(-1, 1)
        E_c2 = np.random.uniform(lower_bound4, upper_bound4, b_size).reshape(-1, 1)
        tau_c2 = np.random.uniform(lower_bound5, upper_bound5, b_size).reshape(-1, 1)
        alpha_2 = np.random.uniform(lower_bound6, upper_bound6, b_size).reshape(-1, 1)

        # Calculate moduli for the current parameter realization
        numerator_Ep_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * np.cos(np.pi*alpha_1/2) + (w_freq*tau_c1)**(2*alpha_1) )
        numerator_Ep_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * np.cos(np.pi*alpha_2/2) + (w_freq*tau_c2)**(2*alpha_2) )

        numerator_Epp_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * np.sin(np.pi*alpha_1/2) )
        numerator_Epp_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * np.sin(np.pi*alpha_2/2) )

        denom_1 = (1 + (w_freq*tau_c1)**alpha_1 * np.cos(np.pi/2*alpha_1) + (w_freq*tau_c1)**(2*alpha_1))
        denom_2 = (1 + (w_freq*tau_c2)**alpha_2 * np.cos(np.pi/2*alpha_2) + (w_freq*tau_c2)**(2*alpha_2))

        Ep = numerator_Ep_1 / denom_1 + numerator_Ep_2 / denom_2
        Epp = numerator_Epp_1 / denom_1 + numerator_Epp_2 / denom_2
        Ecomplex = np.sqrt(Ep**2 + Epp**2)

        # Calculate normalized local sensitivity coefficients
        for p, xv in zip(params_list, [E_c1, tau_c1, alpha_1, E_c2, tau_c2, alpha_2]):
            if p in ['E_c1', 'tau_c1', 'alpha_1']:
                args = (E_c1, tau_c1, alpha_1, w_freq)
            else:
                args = (E_c2, tau_c2, alpha_2, w_freq)

            dEp_val = funcs_dEp[p](*args)
            dEpp_val = funcs_dEpp[p](*args)

            realized_indices['Ep'][p][start_idx:end_idx, :] = dEp_val * xv / Ep
            realized_indices['Epp'][p][start_idx:end_idx, :] = dEpp_val * xv / Epp
            realized_indices['Ecomplex'][p][start_idx:end_idx, :] = (xv / Ecomplex) * np.sqrt(dEp_val**2 + dEpp_val**2)

        print(f"Completed {end_idx} / {n_mc} iterations")

    print("Monte Carlo simulation completed successfully.")
    return realized_indices

save_statistics_npz(realized_indices, params, w_freq, HS, GnP, file_path)

Calculates mean and standard deviation of sensitivity indices, and L1 norm of mean sensitivity indices, followed by saving to a compressed NumPy archive. Args: realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli. params (list): List of parameter names. w_freq (ndarray): Array of frequency values used in the simulation. HS (int): Heat treatment condition. GnP (str): GnP loading condition. file_path (dict): Dictionary containing paths to save the data.

Source code in scripts/LSA/fmg_lsa_utils.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
def save_statistics_npz(realized_indices, params, w_freq, HS, GnP, file_path):
    """Calculates mean and standard deviation of sensitivity indices, and L1 norm of mean sensitivity indices, followed by saving to a compressed NumPy archive.
    Args:
        realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli.
        params (list): List of parameter names.
        w_freq (ndarray): Array of frequency values used in the simulation.
        HS (int): Heat treatment condition.
        GnP (str): GnP loading condition.
        file_path (dict): Dictionary containing paths to save the data."""
    log_w_freq = np.log(w_freq)
    save_data = {}
    for key in ['Ep', 'Epp', 'Ecomplex']:
        mean_std_indices = np.zeros((len(params)*2 + 1, len(w_freq)))
        l1_array = np.zeros(len(params))

        for idx, p in enumerate(params):
            mean_sensitivity_index = np.mean(realized_indices[key][p], axis=0)
            std_sensitivity_index = np.std(realized_indices[key][p], axis=0, ddof=1)

            mean_std_indices[idx*2, :] = mean_sensitivity_index
            mean_std_indices[idx*2 + 1, :] = std_sensitivity_index

            l1_array[idx] = np.trapezoid(np.abs(mean_sensitivity_index), x=log_w_freq)

        save_data[f'all_lsi_{key}_{HS}HS_{GnP}'] = mean_std_indices
        save_data[f'L1_lsi_{key}_{HS}HS_{GnP}'] = l1_array

    np.savez_compressed(file_path['save_path'] + f'/{HS}HS_{GnP}.npz', **save_data)
    print(f"Statistics saved successfully to {file_path['save_path']}/{HS}HS_{GnP}.npz")

scripts.LSA.fmg_lsa_vis_utils

plot_local_sensitivity_indices(E_type, HS, GnP_list, file_path)

Plots the local sensitivity indices for the specified modulus type. Args: E_type (str): Type of modulus ('Ep', 'Epp', or 'Ecomplex'). HS (int): Heat treatment condition. GnP_list (list): List of GnP loading conditions. file_path (dict): Dictionary containing paths to save the plot and load data.

Source code in scripts/LSA/fmg_lsa_vis_utils.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def plot_local_sensitivity_indices(E_type, HS, GnP_list, file_path):
    """Plots the local sensitivity indices for the specified modulus type.
    Args:
        E_type (str): Type of modulus ('Ep', 'Epp', or 'Ecomplex').
        HS (int): Heat treatment condition.
        GnP_list (list): List of GnP loading conditions.
        file_path (dict): Dictionary containing paths to save the plot and load data."""
    _, axs = plt.subplots(3, 2, figsize=(10, 5), dpi=600)
    axs = axs.flatten()
    subplot_idx = [0, 2, 4, 1, 3, 5]

    sym_map = {'Ep': r'E^{\prime}', 'Epp': r'E^{\prime\prime}', 'Ecomplex': r'E^*'}
    mod_sym = sym_map.get(E_type, r'E')

    param_symbols = [r'E_{c_1}', r'\tau_{c_1}', r'\alpha_1', 
                     r'E_{c_2}', r'\tau_{c_2}', r'\alpha_2']

    ylabels = [fr'$\bar{{S}}_{{{mod_sym}, {p}}}$' for p in param_symbols]

    for GnP in GnP_list:
        file_name = f'{file_path["save_path"]}/{HS}HS_{GnP}.npz'
        array_key = f'all_lsi_{E_type}_{HS}HS_{GnP}'
        data = np.load(file_name)
        mean_std_indices = data[array_key]
        w_freq = mean_std_indices[-1, :]

        for i in range(len(param_symbols)):
            ax = axs[subplot_idx[i]]

            mean_val = mean_std_indices[i * 2, :]

            ax.plot(w_freq, mean_val, label=f'{GnP}', linestyle='-', linewidth=1.5)

            ax.set_xscale('log')
            ax.set_ylabel(ylabels[i])
            ax.set_xlim([0.5 * w_freq[0], 1.5 * w_freq[-1]])
            ax.set_xticks([1e-8, 1e-6, 1e-4, 1e-2, 1e0, 1e2])

    axs[-2].set_xlabel(r'$\omega a_{T} \ (rad/s)$')
    axs[-1].set_xlabel(r'$\omega a_{T} \ (rad/s)$')
    axs[0].legend(GnP_list, loc='best')

    plt.tight_layout()

    save_file = f'{file_path["save_path"]}/LSI_{E_type}_{HS}HS.png'
    plt.savefig(save_file, dpi=600, bbox_inches='tight')
    plt.show()

read_excel_range(filename, cell_range)

Read a specified range of cells from an Excel file and return it as a NumPy array.

Parameters:

Name Type Description Default
filename str

Path to the Excel file.

required
cell_range str

Range of cells to read.

required

Returns:

Type Description

np.ndarray: A NumPy array containing the values from the specified cell range.

Source code in scripts/LSA/fmg_lsa_vis_utils.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def read_excel_range(filename, cell_range):
    """Read a specified range of cells from an Excel file and return it as a NumPy array.

    Args:
        filename (str): Path to the Excel file.
        cell_range (str): Range of cells to read.

    Returns:
        np.ndarray: A NumPy array containing the values from the specified cell range.
    """
    wb = load_workbook(filename=filename, data_only=True, read_only=True)
    ws = wb["Sheet1"]

    min_col, min_row, max_col, max_row = range_boundaries(cell_range)

    data = []
    for r in range(min_row, max_row + 1):
        row_vals = []
        for c in range(min_col, max_col + 1):
            v = ws.cell(row=r, column=c).value
            row_vals.append(np.nan if v is None else float(v))
        data.append(row_vals)

    wb.close()
    return np.array(data, dtype=float)

plot_L1_grouped(mean_S, std_S, ylabel, params_list=['$E_{c_1}$', '$\\tau_{c_1}$', '$\\alpha_1$', '$\\beta_1$', '$E_{c_2}$', '$\\tau_{c_2}$', '$\\alpha_2$'], legend_labels=('20% HSWF', '30% HSWF', '40% HSWF'), figsize=(6, 4), save_path=None)

Plot the L-infinity norm of the sensitivity indices for each parameter, grouped by GnP content.

Parameters:

Name Type Description Default
mean_S ndarray

Array of mean sensitivity indices.

required
std_S ndarray

Array of standard deviations of sensitivity indices.

required
ylabel str

Label for the y-axis.

required
params_list list

List of parameter names.

['$E_{c_1}$', '$\\tau_{c_1}$', '$\\alpha_1$', '$\\beta_1$', '$E_{c_2}$', '$\\tau_{c_2}$', '$\\alpha_2$']
legend_labels tuple

Labels for the legend.

('20% HSWF', '30% HSWF', '40% HSWF')
figsize tuple

Figure size.

(6, 4)
save_path str

Path to save the plot. Defaults to None.

None
Source code in scripts/LSA/fmg_lsa_vis_utils.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
def plot_L1_grouped(mean_S,
                    std_S,
                    ylabel,
                    params_list=[r"$E_{c_1}$", r"$\tau_{c_1}$", r"$\alpha_1$", r"$\beta_1$", r"$E_{c_2}$", r"$\tau_{c_2}$", r"$\alpha_2$"],
                    legend_labels=("20% HSWF", "30% HSWF", "40% HSWF"),
                    figsize=(6, 4),
                    save_path=None):
    """Plot the L-infinity norm of the sensitivity indices for each parameter, grouped by GnP content.

    Args:
        mean_S (numpy.ndarray): Array of mean sensitivity indices.
        std_S (numpy.ndarray): Array of standard deviations of sensitivity indices.
        ylabel (str): Label for the y-axis.
        params_list (list): List of parameter names.
        legend_labels (tuple): Labels for the legend.
        figsize (tuple): Figure size.
        save_path (str, optional): Path to save the plot. Defaults to None.
    """
    y = mean_S.T
    err = std_S.T

    sort_order = np.argsort(np.mean(y, axis=1))[::-1]
    sortedY = y[sort_order, :]
    sortedErr = err[sort_order, :]
    sortedLabels = [params_list[i] for i in sort_order]

    ngroups, nbars = sortedY.shape

    _ = plt.figure(figsize=figsize)
    ax = plt.gca()

    group_x = np.arange(ngroups)
    total_group_width = 0.8
    bar_w = total_group_width / nbars
    offsets = (np.arange(nbars) - (nbars - 1) / 2.0) * bar_w

    bars = []
    for i in range(nbars):
        x_i = group_x + offsets[i]
        b = ax.bar(x_i, sortedY[:, i], width=bar_w, label=legend_labels[i])
        bars.append(b)

        ax.errorbar(
            x_i,
            sortedY[:, i],
            yerr=sortedErr[:, i],
            fmt="none",
            ecolor="k",
            capsize=0
        )

    ax.set_ylabel(ylabel)
    ax.set_xlabel(r"Model Parameters")

    ax.set_xticks(group_x)
    ax.set_xticklabels(sortedLabels)
    ax.legend(frameon=False)

    plt.tight_layout()
    plt.savefig(f"{save_path}.png", dpi=300)
    plt.show()

FMM-FMG Model

scripts.LSA.fmm_lsa_utils

generate_derivatives()

Defines symbolic equations and returns lambdified derivative functions. Returns: funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus. funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus.

Source code in scripts/LSA/fmm_lsa_utils.py
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def generate_derivatives():
    """Defines symbolic equations and returns lambdified derivative functions.
    Returns:
        funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus.
        funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus.
    """
    # Define model parameters and frequency as symbols
    E_c1, E_c2, tau_c1, tau_c2, alpha_1, alpha_2, beta_1, w_freq = sp.symbols('E_c1 E_c2 tau_c1 tau_c2 alpha_1 alpha_2 beta_1 w_freq')

    # Define the storage and loss moduli
    numerator_Ep_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * sp.cos(sp.pi*alpha_1/2) + (w_freq*tau_c1)**(2*alpha_1 - beta_1) * sp.cos(sp.pi*beta_1/2) )
    numerator_Ep_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * sp.cos(sp.pi*alpha_2/2) + (w_freq*tau_c2)**(2*alpha_2) )

    numerator_Epp_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * sp.sin(sp.pi*alpha_1/2) + (w_freq * tau_c1)**(2*alpha_1 - beta_1) * sp.sin(sp.pi*beta_1/2) )
    numerator_Epp_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * sp.sin(sp.pi*alpha_2/2) )

    denominator_1 = ( 1 + (w_freq*tau_c1)**(alpha_1-beta_1) * sp.cos(sp.pi * (alpha_1 - beta_1)/2) + (w_freq*tau_c1)**(2*(alpha_1 - beta_1)) )
    denominator_2 = ( 1 + (w_freq*tau_c2)**alpha_2 * sp.cos(sp.pi*alpha_2/2) + (w_freq*tau_c2)**(2*alpha_2) )

    Ep = numerator_Ep_1 / denominator_1 + numerator_Ep_2 / denominator_2
    Epp = numerator_Epp_1 / denominator_1 + numerator_Epp_2 / denominator_2

    # Compute the derivatives and lambdify them for numerical evaluation
    vars_branch_1 = (E_c1, tau_c1, alpha_1, beta_1, w_freq)
    vars_branch_2 = (E_c2, tau_c2, alpha_2, w_freq)

    funcs_dEp = {
        'E_c1': sp.lambdify(vars_branch_1, sp.diff(Ep, E_c1), 'numpy'),
        'tau_c1': sp.lambdify(vars_branch_1, sp.diff(Ep, tau_c1), 'numpy'),
        'alpha_1':  sp.lambdify(vars_branch_1, sp.diff(Ep, alpha_1), 'numpy'),
        'beta_1': sp.lambdify(vars_branch_1, sp.diff(Ep, beta_1), 'numpy'),
        'E_c2': sp.lambdify(vars_branch_2, sp.diff(Ep, E_c2), 'numpy'),
        'tau_c2': sp.lambdify(vars_branch_2, sp.diff(Ep, tau_c2), 'numpy'),
        'alpha_2':  sp.lambdify(vars_branch_2, sp.diff(Ep, alpha_2), 'numpy')
    }

    funcs_dEpp = {
        'E_c1': sp.lambdify(vars_branch_1, sp.diff(Epp, E_c1), 'numpy'),
        'tau_c1': sp.lambdify(vars_branch_1, sp.diff(Epp, tau_c1), 'numpy'),
        'alpha_1':  sp.lambdify(vars_branch_1, sp.diff(Epp, alpha_1), 'numpy'),
        'beta_1': sp.lambdify(vars_branch_1, sp.diff(Epp, beta_1), 'numpy'),
        'E_c2': sp.lambdify(vars_branch_2, sp.diff(Epp, E_c2), 'numpy'),
        'tau_c2': sp.lambdify(vars_branch_2, sp.diff(Epp, tau_c2), 'numpy'),
        'alpha_2':  sp.lambdify(vars_branch_2, sp.diff(Epp, alpha_2), 'numpy')
    }

    print("Derivative functions generated successfully.")
    return funcs_dEp, funcs_dEpp

load_parameter_bounds(file_path, rows, cols, gnp_index, std_fctr=0.05)

Loads Excel data and calculates uniform distribution bounds. Args: file_path (dict): Dictionary containing paths to Excel files. rows (dict): A dictionary containing the start and end rows for reading the optimized parameters. cols (dict): A dictionary containing the column indices for reading the optimized parameters. gnp_index (int): Index for the GnP parameter set to extract means from. std_fctr (float): Standard deviation factor for calculating bounds. Returns: bounds (list): List of tuples containing (a, b) bounds for uniform distribution.

Source code in scripts/LSA/fmm_lsa_utils.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def load_parameter_bounds(file_path, rows, cols, gnp_index, std_fctr=0.05):
    """Loads Excel data and calculates uniform distribution bounds.
    Args:
        file_path (dict): Dictionary containing paths to Excel files.
        rows (dict): A dictionary containing the start and end rows for reading the optimized parameters.
        cols (dict): A dictionary containing the column indices for reading the optimized parameters.
        gnp_index (int): Index for the GnP parameter set to extract means from.
        std_fctr (float): Standard deviation factor for calculating bounds.
    Returns:
        bounds (list): List of tuples containing (a, b) bounds for uniform distribution.
    """
    optimized_params_df = pd.read_excel(
        file_path['opt_path'], usecols=cols['cols_opt'], skiprows=rows['start_row_opt'],
        nrows=rows['end_row_opt'] - rows['start_row_opt'], header=None
    )
    data = optimized_params_df.to_numpy()

    mus = data[gnp_index]

    bounds = []
    for mu in mus:
        sigma = std_fctr * mu
        upper_bound = 0.5 * (np.sqrt(12) * sigma + 2 * mu)
        lower_bound = 2 * mu - upper_bound
        bounds.append((lower_bound, upper_bound))

    print("Parameter bounds calculated successfully.")
    return bounds

run_monte_carlo(funcs_dEp, funcs_dEpp, bounds, params_list, w_freq, n_mc=10 ** 3, batch_size=50000)

Executes the MC simulation and returns result arrays. Args: funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus. funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus. bounds (list): List of tuples containing lower and upper bounds for uniform distribution. w_freq (ndarray): Array of frequency values used in the simulation. n_mc (int): Number of Monte Carlo iterations. Returns: realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli.

Source code in scripts/LSA/fmm_lsa_utils.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def run_monte_carlo(funcs_dEp, funcs_dEpp, bounds, params_list, w_freq, n_mc=10**3, batch_size=50_000):
    """Executes the MC simulation and returns result arrays.
    Args:
        funcs_dEp (dict): Dictionary of lambdified functions for derivatives of storage modulus.
        funcs_dEpp (dict): Dictionary of lambdified functions for derivatives of loss modulus.
        bounds (list): List of tuples containing lower and upper bounds for uniform distribution.
        w_freq (ndarray): Array of frequency values used in the simulation.
        n_mc (int): Number of Monte Carlo iterations.
    Returns:
        realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli.
    """
    (lower_bound1, upper_bound1), (lower_bound2, upper_bound2), (lower_bound3, upper_bound3), (lower_bound4, upper_bound4) = bounds[:4]
    (lower_bound5, upper_bound5), (lower_bound6, upper_bound6), (lower_bound7, upper_bound7) = bounds[4:]

    # Initialize arrays to store normalized sensitivity indices for each modulus, parameter and frequency
    realized_indices = {
        'Ep': {p: np.zeros((n_mc, len(w_freq))) for p in params_list},
        'Epp': {p: np.zeros((n_mc, len(w_freq))) for p in params_list},
        'Ecomplex': {p: np.zeros((n_mc, len(w_freq))) for p in params_list},
    }

    # Process samples in batches
    print(f"Starting Monte Carlo simulation with {n_mc} iterations...")
    for start_idx in range(0, n_mc, batch_size):
        end_idx = min(start_idx + batch_size, n_mc)
        b_size = end_idx - start_idx

        # Generate random variables
        E_c1 = np.random.uniform(lower_bound1, upper_bound1, b_size).reshape(-1, 1)
        tau_c1 = np.random.uniform(lower_bound2, upper_bound2, b_size).reshape(-1, 1)
        alpha_1 = np.random.uniform(lower_bound3, upper_bound3, b_size).reshape(-1, 1)
        beta_1 = np.random.uniform(lower_bound4, upper_bound4, b_size).reshape(-1, 1)
        E_c2 = np.random.uniform(lower_bound5, upper_bound5, b_size).reshape(-1, 1)
        tau_c2 = np.random.uniform(lower_bound6, upper_bound6, b_size).reshape(-1, 1)
        alpha_2 = np.random.uniform(lower_bound7, upper_bound7, b_size).reshape(-1, 1)

        # Calculate moduli for the current parameter realization
        numerator_Ep_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * np.cos(np.pi*alpha_1/2) + (w_freq*tau_c1)**(2*alpha_1 - beta_1) * np.cos(np.pi*beta_1/2) )
        numerator_Ep_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * np.cos(np.pi*alpha_2/2) + (w_freq*tau_c2)**(2*alpha_2) )

        numerator_Epp_1 = E_c1 * ( (w_freq*tau_c1)**alpha_1 * np.sin(np.pi*alpha_1/2) + (w_freq * tau_c1)**(2*alpha_1 - beta_1) * np.sin(np.pi*beta_1/2) )
        numerator_Epp_2 = E_c2 * ( (w_freq*tau_c2)**alpha_2 * np.sin(np.pi*alpha_2/2) )

        denominator_1 = ( 1 + (w_freq*tau_c1)**(alpha_1-beta_1) * np.cos(np.pi*(alpha_1 - beta_1)/2) + (w_freq*tau_c1)**(2*(alpha_1 - beta_1)) )
        denominator_2 = ( 1 + (w_freq*tau_c2)**alpha_2 * np.cos(np.pi*alpha_2/2) + (w_freq*tau_c2)**(2*alpha_2) )

        Ep = numerator_Ep_1 / denominator_1 + numerator_Ep_2 / denominator_2
        Epp = numerator_Epp_1 / denominator_1 + numerator_Epp_2 / denominator_2
        Ecomplex = np.sqrt(Ep**2 + Epp**2)

        # Calculate normalized local sensitivity indices
        for p, xv in zip(params_list, [E_c1, tau_c1, alpha_1, beta_1, E_c2, tau_c2, alpha_2]):
            if p in ['E_c1', 'tau_c1', 'alpha_1', 'beta_1']:
                args = (E_c1, tau_c1, alpha_1, beta_1, w_freq)
            else:
                args = (E_c2, tau_c2, alpha_2, w_freq)

            dEp_val = funcs_dEp[p](*args)
            dEpp_val = funcs_dEpp[p](*args)

            realized_indices['Ep'][p][start_idx:end_idx, :] = dEp_val * xv / Ep
            realized_indices['Epp'][p][start_idx:end_idx, :] = dEpp_val * xv / Epp
            realized_indices['Ecomplex'][p][start_idx:end_idx, :] = (xv / Ecomplex) * np.sqrt(dEp_val**2 + dEpp_val**2)

        print(f"Completed {end_idx} / {n_mc} iterations")

    print("Monte Carlo simulation completed successfully.")
    return realized_indices

save_statistics_npz(realized_indices, params, w_freq, HS, GnP, file_path)

Calculates mean and standard deviation of sensitivity indices, and L1 norm of mean sensitivity indices, followed by saving to a compressed NumPy archive. Args: realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli. params (list): List of parameter names. w_freq (ndarray): Array of frequency values used in the simulation. HS (int): Heat treatment condition. GnP (str): GnP loading condition. file_path (dict): Dictionary containing paths to save the data.

Source code in scripts/LSA/fmm_lsa_utils.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
def save_statistics_npz(realized_indices, params, w_freq, HS, GnP, file_path):
    """Calculates mean and standard deviation of sensitivity indices, and L1 norm of mean sensitivity indices, followed by saving to a compressed NumPy archive.
    Args:
        realized_indices (dict): Dictionary containing arrays of sensitivity indices for all moduli.
        params (list): List of parameter names.
        w_freq (ndarray): Array of frequency values used in the simulation.
        HS (int): Heat treatment condition.
        GnP (str): GnP loading condition.
        file_path (dict): Dictionary containing paths to save the data."""
    log_w_freq = np.log(w_freq)
    save_data = {}
    for key in ['Ep', 'Epp', 'Ecomplex']:
        mean_std_indices = np.zeros((len(params)*2 + 1, len(w_freq)))
        l1_array = np.zeros(len(params))

        for idx, p in enumerate(params):
            mean_sensitivity_index = np.mean(realized_indices[key][p], axis=0)
            std_sensitivity_index = np.std(realized_indices[key][p], axis=0, ddof=1)

            mean_std_indices[idx*2, :] = mean_sensitivity_index
            mean_std_indices[idx*2 + 1, :] = std_sensitivity_index

            l1_array[idx] = np.trapezoid(np.abs(mean_sensitivity_index), x=log_w_freq)

        save_data[f'all_lsi_{key}_{HS}HS_{GnP}'] = mean_std_indices
        save_data[f'L1_lsi_{key}_{HS}HS_{GnP}'] = l1_array

    np.savez_compressed(file_path['save_path'] + f'/{HS}HS_{GnP}.npz', **save_data)
    print(f"Statistics saved successfully to {file_path['save_path']}/{HS}HS_{GnP}.npz")

scripts.LSA.fmm_lsa_vis_utils

plot_local_sensitivity_indices(E_type, HS, GnP_list, file_path)

Plots the local sensitivity indices for the specified modulus type. Args: E_type (str): Type of modulus ('Ep', 'Epp', or 'Ecomplex'). HS (int): Heat treatment condition. GnP_list (list): List of GnP loading conditions. file_path (dict): Dictionary containing paths to save the plot and load data.

Source code in scripts/LSA/fmm_lsa_vis_utils.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def plot_local_sensitivity_indices(E_type, HS, GnP_list, file_path):
    """Plots the local sensitivity indices for the specified modulus type.
    Args:
        E_type (str): Type of modulus ('Ep', 'Epp', or 'Ecomplex').
        HS (int): Heat treatment condition.
        GnP_list (list): List of GnP loading conditions.
        file_path (dict): Dictionary containing paths to save the plot and load data."""
    fig, axs = plt.subplots(4, 2, figsize=(10, 6.67), dpi=600)
    axs = axs.flatten()
    subplot_idx = [0, 2, 4, 6, 1, 3, 5]

    sym_map = {'Ep': r'E^{\prime}', 'Epp': r'E^{\prime\prime}', 'Ecomplex': r'E^*'}
    mod_sym = sym_map.get(E_type, r'E')

    param_symbols = [r'E_{c_1}', r'\tau_{c_1}', r'\alpha_1', r'\beta_1',
                    r'E_{c_2}', r'\tau_{c_2}', r'\alpha_2']

    ylabels = [fr'$\bar{{S}}_{{{mod_sym}, {p}}}$' for p in param_symbols]

    for GnP in GnP_list:
        file_name = f'{file_path["save_path"]}/{HS}HS_{GnP}.npz'
        array_key = f'all_lsi_{E_type}_{HS}HS_{GnP}'
        data = np.load(file_name)
        mean_std_indices = data[array_key]
        w_freq = mean_std_indices[-1, :]

        for i in range(len(param_symbols)):
            ax = axs[subplot_idx[i]]

            mean_val = mean_std_indices[i * 2, :]

            ax.plot(w_freq, mean_val, label=f'{GnP}', linestyle='-', linewidth=1.5)

            ax.set_xscale('log')
            ax.set_ylabel(ylabels[i])
            ax.set_xlim([0.5 * w_freq[0], 1.5 * w_freq[-1]])
            ax.set_xticks([1e-8, 1e-6, 1e-4, 1e-2, 1e0, 1e2])

    axs[-2].set_xlabel(r'$\omega a_{T} \ (rad/s)$')
    axs[-3].set_xlabel(r'$\omega a_{T} \ (rad/s)$')
    axs[0].legend(GnP_list, loc='best')
    fig.delaxes(axs[-1])
    axs[0].legend()
    plt.tight_layout()

    save_file = f'{file_path["save_path"]}/LSI_{E_type}_{HS}HS.png'
    plt.savefig(save_file, dpi=600, bbox_inches='tight')
    plt.show()

read_excel_range(filename, cell_range)

Read a specified range of cells from an Excel file and return it as a NumPy array.

Parameters:

Name Type Description Default
filename str

Path to the Excel file.

required
cell_range str

Range of cells to read.

required

Returns:

Type Description

np.ndarray: A NumPy array containing the values from the specified cell range.

Source code in scripts/LSA/fmm_lsa_vis_utils.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def read_excel_range(filename, cell_range):
    """Read a specified range of cells from an Excel file and return it as a NumPy array.

    Args:
        filename (str): Path to the Excel file.
        cell_range (str): Range of cells to read.

    Returns:
        np.ndarray: A NumPy array containing the values from the specified cell range.
    """
    wb = load_workbook(filename=filename, data_only=True, read_only=True)
    ws = wb["Sheet1"]

    min_col, min_row, max_col, max_row = range_boundaries(cell_range)

    data = []
    for r in range(min_row, max_row + 1):
        row_vals = []
        for c in range(min_col, max_col + 1):
            v = ws.cell(row=r, column=c).value
            row_vals.append(np.nan if v is None else float(v))
        data.append(row_vals)

    wb.close()
    return np.array(data, dtype=float)

plot_L1_grouped(mean_S, std_S, ylabel, params_list=['$E_{c_1}$', '$\\tau_{c_1}$', '$\\alpha_1$', '$\\beta_1$', '$E_{c_2}$', '$\\tau_{c_2}$', '$\\alpha_2$'], legend_labels=('20% HSWF', '30% HSWF', '40% HSWF'), figsize=(6, 4), save_path=None)

Plot the L-infinity norm of the sensitivity indices for each parameter, grouped by GnP content.

Parameters:

Name Type Description Default
mean_S ndarray

Array of mean sensitivity indices.

required
std_S ndarray

Array of standard deviations of sensitivity indices.

required
ylabel str

Label for the y-axis.

required
params_list list

List of parameter names.

['$E_{c_1}$', '$\\tau_{c_1}$', '$\\alpha_1$', '$\\beta_1$', '$E_{c_2}$', '$\\tau_{c_2}$', '$\\alpha_2$']
legend_labels tuple

Labels for the legend.

('20% HSWF', '30% HSWF', '40% HSWF')
figsize tuple

Figure size.

(6, 4)
save_path str

Path to save the plot. Defaults to None.

None
Source code in scripts/LSA/fmm_lsa_vis_utils.py
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def plot_L1_grouped(mean_S,
                    std_S,
                    ylabel,
                    params_list=[r"$E_{c_1}$", r"$\tau_{c_1}$", r"$\alpha_1$", r"$\beta_1$", r"$E_{c_2}$", r"$\tau_{c_2}$", r"$\alpha_2$"],
                    legend_labels=("20% HSWF", "30% HSWF", "40% HSWF"),
                    figsize=(6, 4),
                    save_path=None):
    """Plot the L-infinity norm of the sensitivity indices for each parameter, grouped by GnP content.

    Args:
        mean_S (numpy.ndarray): Array of mean sensitivity indices.
        std_S (numpy.ndarray): Array of standard deviations of sensitivity indices.
        ylabel (str): Label for the y-axis.
        params_list (list): List of parameter names.
        legend_labels (tuple): Labels for the legend.
        figsize (tuple): Figure size.
        save_path (str, optional): Path to save the plot. Defaults to None.
    """
    y = mean_S.T
    err = std_S.T

    sort_order = np.argsort(np.mean(y, axis=1))[::-1]
    sortedY = y[sort_order, :]
    sortedErr = err[sort_order, :]
    sortedLabels = [params_list[i] for i in sort_order]

    ngroups, nbars = sortedY.shape

    _ = plt.figure(figsize=figsize)
    ax = plt.gca()

    group_x = np.arange(ngroups)
    total_group_width = 0.8
    bar_w = total_group_width / nbars
    offsets = (np.arange(nbars) - (nbars - 1) / 2.0) * bar_w

    bars = []
    for i in range(nbars):
        x_i = group_x + offsets[i]
        b = ax.bar(x_i, sortedY[:, i], width=bar_w, label=legend_labels[i])
        bars.append(b)

        ax.errorbar(
            x_i,
            sortedY[:, i],
            yerr=sortedErr[:, i],
            fmt="none",
            ecolor="k",
            capsize=0
        )

    ax.set_ylabel(ylabel)
    ax.set_xlabel(r"Model Parameters")

    ax.set_xticks(group_x)
    ax.set_xticklabels(sortedLabels)
    ax.legend(frameon=False)

    plt.tight_layout()
    plt.savefig(f"{save_path}.png", dpi=300)
    plt.show()