1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950 |
- import torch
- from sklearn.model_selection import train_test_split
- from AutoEncoderModel import AutoEncoder
- import pandas as pd
- input_dim = 13
- output_dim = input_dim
- path_datax = './DataLib/Data.pt'
- path_datay = './DataLib/Data.pt'
- test_size = 0.2
- val_size = 0.1
- batch_size = 256
- epochs = 500
- DataX = torch.load(path_datax)
- DataY = torch.load(path_datay)
- # DataX = DataX[:5000, :]
- # DataY = DataY[:5000, :]
- X_temp, X_train, Y_temp, Y_train = train_test_split(DataX, DataY, test_size=1 - test_size - val_size, random_state=42)
- X_test, X_val, Y_test, Y_val = train_test_split(X_temp, Y_temp, test_size=test_size / (test_size + val_size),
- random_state=42)
- hidden_dim_lib = [256]
- latent_dim_lib = [100]
- # num_layer_lib = [3]
- lr_lib = [1E-3]
- for hidden_dim in hidden_dim_lib:
- for latent_dim in latent_dim_lib:
- for lr in lr_lib:
- model = AutoEncoder(state_dim=input_dim, hidden_dim=hidden_dim, latent_dim=latent_dim)
- TrainLoss, ValLoss = model.autoencoder_train(batch_size=batch_size, epochs=epochs, lr=lr,
- datax_train=X_train, datay_train=Y_train,
- datax_val=X_val, datay_val=Y_val)
- ModelFileName = f'./ModelLib/models/model_nlat_{latent_dim}_nhid_{hidden_dim}_lr_{lr}'
- torch.save(model, ModelFileName + '.pt')
- TrainLossFileName = f'./ModelLib/losses/trainloss_nlat_{latent_dim}_nhid_{hidden_dim}_lr_{lr}.csv'
- ValLossFileName = f'./ModelLib/losses/valloss_nlat_{latent_dim}_nhid_{hidden_dim}_lr_{lr}.csv'
- df_train = pd.DataFrame(TrainLoss)
- df_val = pd.DataFrame(ValLoss)
- df_train.to_csv(TrainLossFileName, index=False, header=False, sep=',')
- df_val.to_csv(ValLossFileName, index=False, header=False, sep=',')
|