Skip to content

Commit 8227d52

Browse files
committed
minor finetune yaml update
1 parent 0033dc9 commit 8227d52

1 file changed

Lines changed: 12 additions & 12 deletions

File tree

so3lr/config/finetune.yaml

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ data:
88
dipole_vec_unit: e * Angstrom # Dipole vector unit.
99
shift_mode: null # Options are null, mean, custom.
1010
energy_shifts: null # Energy shifts in eV to subtract.
11-
split_seed: 0 # Seed using for splitting the data into training, validation and test.
11+
split_seed: 42 # Seed using for splitting the data into training, validation and test.
1212
neighbors_lr_bool: true # Calculate long-range neighborhood indices. Required for modules like DispersionEnergy.
1313
neighbors_lr_cutoff: 100 # Cutoff for calculating the long-range neighborhoods in Angstrom. Is converted to the
1414
# data set units internally. Note that it is not required to be equal to the lr_cutoff of the model.
@@ -19,9 +19,9 @@ data:
1919
min_distance: 0.25 # Minimal allowed distance in Angstrom. Is converted to the data set units internally.
2020
max_force: 25. # Maximal allowed force component in eV/Angstrom. Is converted to the data set units internally.
2121
optimizer:
22-
name: adam # Name of the optimizer. See https://optax.readthedocs.io/en/latest/api.html#common-optimizers for available ones.
22+
name: adamw # Name of the optimizer. See https://optax.readthedocs.io/en/latest/api.html#common-optimizers for available ones.
2323
optimizer_args: null
24-
learning_rate: 0.001 # Learning rate to use.
24+
learning_rate: 0.0001 # Learning rate to use.
2525
learning_rate_schedule: exponential_decay # Which learning rate schedule to use. See https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules for available ones.
2626
learning_rate_schedule_args: # Arguments passed to the learning rate schedule. See https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules.
2727
decay_rate: 0.75
@@ -43,20 +43,20 @@ training:
4343
batch_max_num_graphs: 6 # Maximal number of graphs per batch.
4444
# Since there is one padding graph involved for an effective batch size of 5 corresponds to 6 max_num_graphs.
4545
batch_n_proc: 1 # Number of processors used for queuing training batches, used only for TFDS dataloader
46-
eval_every_num_steps: 1000 # Number of gradient steps after which the metrics on the validation set are calculated.
46+
eval_every_num_steps: 10000 # Number of gradient steps after which the metrics on the validation set are calculated.
4747
loss_weights:
48-
energy: 0.01 # Loss weight for the energy.
49-
forces: 0.99 # Loss weight for the forces.
50-
# Uncomment if dipole_vec and/or hirshfeld ratios are in the dataset and should be used in the loss.
48+
forces: 1.00 # Loss weight for the forces.
49+
# Uncomment if energy, dipole_vec and/or hirshfeld ratios are in the dataset and should be used in the loss.
50+
# energy: 0.01 # Loss weight for the energy.
5151
# dipole_vec: 0.01
5252
# hirshfeld_ratios: 0.01
53-
use_robust_loss: false # Use robust loss function.
54-
robust_loss_alpha: 1.99 # Alpha parameter for the robust loss function.
55-
model_seed: 0 # Seed used for the initialization of the model parameters.
56-
training_seed: 0 # Seed used for shuffling the batches during training.
53+
use_robust_loss: false # Use robust loss function. See https://arxiv.org/abs/1701.03077.
54+
robust_loss_alpha: 1.00 # Alpha parameter for the robust loss function.
55+
model_seed: 42 # Seed used for the initialization of the model parameters.
56+
training_seed: 42 # Seed used for shuffling the batches during training.
5757
log_gradient_values: False # Log the norm of the gradients for each set of weights.
5858
use_wandb: true # Use wandb for logging.
5959
wandb_init_args: # Arguments to wandb.init(). See https://docs.wandb.ai/ref/python/init. The config itself is passed as config to wandb.init().
6060
# name: finetune_so3lr
6161
project: so3lr
62-
group: finetune_so3lr
62+
group: finetune_so3lr

0 commit comments

Comments
 (0)