我需要在训练神经网络时在多次运行之间获得均方误差(MSE)的一致结果。

huangapple go评论67阅读模式
英文:

I need consistent results for MSE between runs when training a Neural Network

问题

    import pandas as pd
    import numpy as np
    from sklearn.preprocessing import StandardScaler
    from sklearn.metrics import mean_squared_error
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense
    from tensorflow.keras.optimizers import Adam
    import matplotlib.pyplot as plt
    from bayes_opt import BayesianOptimization
    
    # 从UCI机器学习存储库加载数据
    
    df = pd.read_csv(r'C:\Test_set_Yacht.csv')
    df1 = pd.read_csv(r'C:\Train_set_Yacht.csv')
    df2 = pd.read_csv(r'C:\Yacht_hydro.csv')
    
    X = df2.drop("residuary_resistance", axis=1)
    Y = df2["residuary_resistance"]
    
    # 将数据分割为特征和目标
    X_train = df1.drop("residuary_resistance", axis=1)
    y_train = df1["residuary_resistance"]
    
    # 将数据分割为训练集和测试集
    X_test = df.drop("residuary_resistance", axis=1)
    y_test = df["residuary_resistance"]
    
    # 使用StandardScaler对数据进行缩放
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    
    def objective_model_1(hidden_units, learning_rate):
        model = Sequential()
        model.add(Dense(hidden_units, input_dim=X.shape[1], activation="relu"))
        model.add(Dense(hidden_units, activation="relu"))
        model.add(Dense(1, activation="linear"))
        model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate))
        model.fit(X_train_scaled, y_train, epochs=100, verbose=0)
        y_pred = model.predict(X_test_scaled)
        return -mean_squared_error(y_test, y_pred)
    
    pbounds_model_1 = {
        "hidden_units": (32, 128),
        "learning_rate": (1e-5, 1e-1),
    }
    
    bo_model_1 = BayesianOptimization(
        f=objective_model_1,
        pbounds=pbounds_model_1,
        random_state=42,
    )
    
    bo_model_1.maximize(init_points=10, n_iter=90)
    
    
    def objective_model_2(hidden_units, learning_rate):
        model = Sequential()
        model.add(Dense(hidden_units, input_shape=X_train_scaled.shape[1:], activation="relu"))
        model.add(Dense(hidden_units, activation="relu"))
        model.add(Dense(hidden_units, activation="relu"))
        model.add(Dense(hidden_units, activation="relu"))
        model.add(Dense(1, activation="linear"))
        model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate))
        model.fit(X_train_scaled, y_train, epochs=100, verbose=0)
        y_pred = model.predict(X_test_scaled)
        return -mean_squared_error(y_test, y_pred)
    
    pbounds_model_2 = {
        "hidden_units": (32, 128),
        "learning_rate": (1e-5, 1e-1),
    }
    
    bo_model_2 = BayesianOptimization(
        f=objective_model_2,
        pbounds=pbounds_model_2,
        random_state=42,
    )
    
    bo_model_2.maximize(init_points=10, n_iter=90)
    
    # 获取最佳超参数
    
    # 获取每个模型的最佳超参数
    best_params_model_1 = bo_model_1.max["params"]
    best_params_model_2 = bo_model_2.max["params"]
    
    # 使用最佳超参数训练和评估模型1
    model_1 = Sequential()
    model_1.add(Dense(32, input_dim=X.shape[1], activation="relu"))
    model_1.add(Dense(32, activation="relu"))
    model_1.add(Dense(1, activation="linear"))
    model_1.compile(loss="mse", optimizer=Adam(learning_rate=best_params_model_1["learning_rate"]))
    model_1.fit(X_train_scaled, y_train, epochs=100, verbose=0)
    y_pred_1 = model_1.predict(X_test_scaled)
    mse_1 = mean_squared_error(y_test, y_pred_1)
    print("模型 1 在测试集上的均方误差:", mse_1)
    
    # 使用最佳超参数训练和评估模型2
    model_2 = Sequential()
    model_2.add(Dense(64, input_dim=X.shape[1], activation="relu"))
    model_2.add(Dense(64, activation="relu"))
    model_2.add(Dense(64, activation="relu"))
    model_2.add(Dense(64, activation="relu"))
    model_2.add(Dense(1, activation="linear"))
    model_2.compile(loss="mse", optimizer=Adam(learning_rate=best_params_model_2["learning_rate"]))
    model_2.fit(X_train_scaled, y_train, epochs=100, verbose=0)
    y_pred_2 = model_2.predict(X_test_scaled)
    mse_2 = mean_squared_error(y_test, y_pred_2)
    print("模型 2 在测试集上的均方误差:", mse_2)
英文:
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
from bayes_opt import BayesianOptimization
# load data from UCI Machine Learning Repository
df = pd.read_csv(r'C:\Test_set_Yacht.csv')
df1 = pd.read_csv(r'C:\Train_set_Yacht.csv')
df2 = pd.read_csv(r'C:\Yacht_hydro.csv')
X = df2.drop("residuary_resistance", axis=1)
Y = df2["residuary_resistance"]
# split data into features and target
X_train = df1.drop("residuary_resistance", axis=1)
y_train = df1["residuary_resistance"]
# split data into train and test sets
X_test = df.drop("residuary_resistance", axis=1)
y_test = df["residuary_resistance"]
# scale data using StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
def objective_model_1(hidden_units, learning_rate):
model = Sequential()
model.add(Dense(hidden_units, input_dim=X.shape[1], activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(1, activation="linear"))
model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate))
model.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred = model.predict(X_test_scaled)
return -mean_squared_error(y_test, y_pred)
pbounds_model_1 = {
"hidden_units": (32, 128),
"learning_rate": (1e-5, 1e-1),
}
bo_model_1 = BayesianOptimization(
f=objective_model_1,
pbounds=pbounds_model_1,
random_state=42,
)
bo_model_1.maximize(init_points=10, n_iter=90)
def objective_model_2(hidden_units, learning_rate):
model = Sequential()
model.add(Dense(hidden_units, input_shape=X_train_scaled.shape[1:], activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(hidden_units, activation="relu"))
model.add(Dense(1, activation="linear"))
model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate))
model.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred = model.predict(X_test_scaled)
return -mean_squared_error(y_test, y_pred)
pbounds_model_2 = {
"hidden_units": (32, 128),
"learning_rate": (1e-5, 1e-1),
}
bo_model_2 = BayesianOptimization(
f=objective_model_2,
pbounds=pbounds_model_2,
random_state=42,
)
bo_model_2.maximize(init_points=10, n_iter=90)
# get the best hyperparameters
# get the best hyperparameters for each model
best_params_model_1 = bo_model_1.max["params"]
best_params_model_2 = bo_model_2.max["params"]
# train and evaluate model 1 with best hyperparameters
model_1 = Sequential()
model_1.add(Dense(32, input_dim=X.shape[1], activation="relu"))
model_1.add(Dense(32, activation="relu"))
model_1.add(Dense(1, activation="linear"))
model_1.compile(loss="mse", optimizer=Adam(learning_rate=best_params_model_1["learning_rate"]))
model_1.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred_1 = model_1.predict(X_test_scaled)
mse_1 = mean_squared_error(y_test, y_pred_1)
print("Model 1 MSE on test set:", mse_1)
# train and evaluate model 2 with best hyperparameters
model_2 = Sequential()
model_2.add(Dense(64, input_dim=X.shape[1], activation="relu"))
model_2.add(Dense(64, activation="relu"))
model_2.add(Dense(64, activation="relu"))
model_2.add(Dense(64, activation="relu"))
model_2.add(Dense(1, activation="linear"))
model_2.compile(loss="mse", optimizer=Adam(learning_rate=best_params_model_2["learning_rate"]))
model_2.fit(X_train_scaled, y_train, epochs=100, verbose=0)
y_pred_2 = model_2.predict(X_test_scaled)
mse_2 = mean_squared_error(y_test, y_pred_2)
print("Model 2 MSE on test set:", mse_2)

In the following code, I implement a bayesian optimization for hyperparameter tunning of 2 different NN using the data set from: https://archive.ics.uci.edu/ml/datasets/yacht+hydrodynamics,After running this, I create again those 2 NN in a JupytherNotebook code block and run with the best hyperparameters already determined by the bayesian optimizer. I need each time I run the code to get the same MSE. This is the reason why I am splitting the data already to ensure the same results.

答案1

得分: 1

tensorflow库的内部工作是不确定的。因此,为了获得可重现的结果,你必须设置一个随机种子,在实践中,你只需要在代码开头添加这一行:

tf.random.set_seed(0)
英文:

The inner workings of the tensorflow library are non-deterministic. So you must set a random seed in order to get reproducible results, in practice you just need to add this line at the start of your code:

tf.random.set_seed(0)

huangapple
  • 本文由 发表于 2023年2月19日 01:47:31
  • 转载请务必保留本文链接:https://go.coder-hub.com/75495246.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定