You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: so3lr/config/finetune.yaml
+12-12Lines changed: 12 additions & 12 deletions
Original file line number
Diff line number
Diff line change
@@ -8,7 +8,7 @@ data:
8
8
dipole_vec_unit: e * Angstrom # Dipole vector unit.
9
9
shift_mode: null # Options are null, mean, custom.
10
10
energy_shifts: null # Energy shifts in eV to subtract.
11
-
split_seed: 0# Seed using for splitting the data into training, validation and test.
11
+
split_seed: 42# Seed using for splitting the data into training, validation and test.
12
12
neighbors_lr_bool: true # Calculate long-range neighborhood indices. Required for modules like DispersionEnergy.
13
13
neighbors_lr_cutoff: 100# Cutoff for calculating the long-range neighborhoods in Angstrom. Is converted to the
14
14
# data set units internally. Note that it is not required to be equal to the lr_cutoff of the model.
@@ -19,9 +19,9 @@ data:
19
19
min_distance: 0.25# Minimal allowed distance in Angstrom. Is converted to the data set units internally.
20
20
max_force: 25.# Maximal allowed force component in eV/Angstrom. Is converted to the data set units internally.
21
21
optimizer:
22
-
name: adam# Name of the optimizer. See https://optax.readthedocs.io/en/latest/api.html#common-optimizers for available ones.
22
+
name: adamw# Name of the optimizer. See https://optax.readthedocs.io/en/latest/api.html#common-optimizers for available ones.
23
23
optimizer_args: null
24
-
learning_rate: 0.001# Learning rate to use.
24
+
learning_rate: 0.0001# Learning rate to use.
25
25
learning_rate_schedule: exponential_decay # Which learning rate schedule to use. See https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules for available ones.
26
26
learning_rate_schedule_args: # Arguments passed to the learning rate schedule. See https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules.
27
27
decay_rate: 0.75
@@ -43,20 +43,20 @@ training:
43
43
batch_max_num_graphs: 6# Maximal number of graphs per batch.
44
44
# Since there is one padding graph involved for an effective batch size of 5 corresponds to 6 max_num_graphs.
45
45
batch_n_proc: 1# Number of processors used for queuing training batches, used only for TFDS dataloader
46
-
eval_every_num_steps: 1000# Number of gradient steps after which the metrics on the validation set are calculated.
46
+
eval_every_num_steps: 10000# Number of gradient steps after which the metrics on the validation set are calculated.
47
47
loss_weights:
48
-
energy: 0.01# Loss weight for the energy.
49
-
forces: 0.99# Loss weight for the forces.
50
-
#Uncomment if dipole_vec and/or hirshfeld ratios are in the dataset and should be used in the loss.
48
+
forces: 1.00# Loss weight for the forces.
49
+
# Uncomment if energy, dipole_vec and/or hirshfeld ratios are in the dataset and should be used in the loss.
50
+
#energy: 0.01 # Loss weight for the energy.
51
51
# dipole_vec: 0.01
52
52
# hirshfeld_ratios: 0.01
53
-
use_robust_loss: false # Use robust loss function.
54
-
robust_loss_alpha: 1.99# Alpha parameter for the robust loss function.
55
-
model_seed: 0# Seed used for the initialization of the model parameters.
56
-
training_seed: 0# Seed used for shuffling the batches during training.
53
+
use_robust_loss: false # Use robust loss function. See https://arxiv.org/abs/1701.03077.
54
+
robust_loss_alpha: 1.00# Alpha parameter for the robust loss function.
55
+
model_seed: 42# Seed used for the initialization of the model parameters.
56
+
training_seed: 42# Seed used for shuffling the batches during training.
57
57
log_gradient_values: False # Log the norm of the gradients for each set of weights.
58
58
use_wandb: true # Use wandb for logging.
59
59
wandb_init_args: # Arguments to wandb.init(). See https://docs.wandb.ai/ref/python/init. The config itself is passed as config to wandb.init().
0 commit comments