diff --git a/models/__pycache__/neuralnet.cpython-38.pyc b/models/__pycache__/neuralnet.cpython-38.pyc index cbe052be930dd43036f0f60dbcc2069a4a430de5..682fd872d624cdd604379504894e89096240c244 100644 Binary files a/models/__pycache__/neuralnet.cpython-38.pyc and b/models/__pycache__/neuralnet.cpython-38.pyc differ diff --git a/models/neuralnet.py b/models/neuralnet.py index e1f32908d6c4a966334f8b6b06ca5a05b504842f..6a07c4e91d9b1656529cdb7b8d56ba06b33e0dd3 100644 --- a/models/neuralnet.py +++ b/models/neuralnet.py @@ -153,7 +153,7 @@ class FNN_HyperModel(kt.HyperModel): self.model = model return model - def fit(self, hp, model, x, y, validation_data, epochs, **kwargs): + def fit(self, hp, model, x, y, validation_data=None, **kwargs): def make_ds(x, y): ds_x = tf.data.Dataset.from_tensor_slices(x)\ .batch(self.batch_size, drop_remainder=True) @@ -169,14 +169,12 @@ class FNN_HyperModel(kt.HyperModel): val_ds = make_ds(*validation_data) history = model.fit(train_ds, validation_data=val_ds, - epochs=epochs, verbose=self.verbose, **kwargs ) else: val_ds = None history = model.fit(x, y, - epochs=epochs, verbose=self.verbose, batch_size=self.batch_size, **kwargs diff --git a/modules/__pycache__/utils.cpython-38.pyc b/modules/__pycache__/utils.cpython-38.pyc index 0ae60611424d313f7060707ed9e97db8ad855b88..b33d8e0b67828c2e0c10591ec40526fbe33e3837 100644 Binary files a/modules/__pycache__/utils.cpython-38.pyc and b/modules/__pycache__/utils.cpython-38.pyc differ diff --git a/modules/utils.py b/modules/utils.py index 295d2b4252b33af02095966eb455cec0a1aa1db5..f633da551c64fde12f6361f387325d610e1617df 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -224,11 +224,17 @@ def model_training(mdl_str, x_train, y_train, marker, lstm_mdl = tuner.load_model(is_training=True) lstm_hypermodel.verbose = True callbacks = tuner.get_callbacks(epochs=extra_train) + fit_kwargs = {'epochs': extra_train, + 'callbacks': callbacks, + } + if validation_data is not None: + fit_kwargs['validation_split'] = None + else: + fit_kwargs['validation_split'] = 0.2 + history = lstm_hypermodel.fit( None, lstm_mdl, x_train, y_train, - validation_data=validation_data, epochs=extra_train, - callbacks=callbacks - ) + **fit_kwargs,) tuner.save_weights_to_path() tuner.load_model(is_training=False) @@ -267,11 +273,17 @@ def model_training(mdl_str, x_train, y_train, marker, hypermodel.verbose = True callbacks = tuner.get_callbacks(epochs=extra_train) + fit_kwargs = {'epochs': extra_train, + 'callbacks': callbacks, + } + if validation_data is not None: + fit_kwargs['validation_split'] = None + else: + fit_kwargs['validation_split'] = 0.2 + history = hypermodel.fit( None, mdl, x_train, y_train, - validation_data=validation_data, epochs=extra_train, - callbacks=callbacks, - ) + **fit_kwargs,) tuner.save_weights_to_path() tuner.load_model(is_training=False) diff --git a/regress_rr.py b/regress_rr.py index 506396864b80d539537d0d7945c655f788aa6cfe..b4dce1ce8808107c32430e1bb6f352d81fc1b979 100644 --- a/regress_rr.py +++ b/regress_rr.py @@ -1424,7 +1424,7 @@ def arg_parser(): ) parser.add_argument("-s", '--subject', type=int, default=1, - choices=list(range(1,N_SUBJECT_MAX))+[-1], + choices=list(range(1,N_SUBJECT_MAX+1))+[-1], ) parser.add_argument("-f", '--feature_method', type=str, default='minirocket',