Skip to content
Snippets Groups Projects
Commit 5bc9874a authored by Moritz Ibing's avatar Moritz Ibing
Browse files

Deleted old configs

parent 2443de1b
No related branches found
No related tags found
No related merge requests found
Showing
with 0 additions and 777 deletions
name: shapenet_architectures
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite, single_conv, substitution, double_substitution]
head: [composite, single_conv, substitution, double_substitution]
# attention
attention: basic
num_positions: 2500
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.0
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_architectures
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_decoder
embedding: [composite, substitution]
head: [substitution]
# attention
attention: basic
num_positions: 3072
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.0
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_architectures
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_only
embedding: [composite]
head: [composite]
# attention
attention: basic
num_positions: 3072
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.0
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_architectures_small
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 64
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite_C, single_conv, substitution, double_substitution]
head: [composite_C, single_conv, substitution, double_substitution]
# attention
attention: basic
num_positions: 2048
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 369
warmup_steps: 0.1
batch_size: 4
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.1
# hardware
gpus: 1
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_architectures_small
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 64
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_decoder
embedding: [composite_C, substitution]
head: [substitution]
# attention
attention: basic
num_positions: 2048
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 369
warmup_steps: 0.1
batch_size: 4
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.1
# hardware
gpus: 1
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_architectures_small
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 64
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_only
embedding: [composite_A]
head: [composite_A]
# attention
attention: basic
num_positions: 2048
embed_dim: 512
num_layers: 12
num_heads: 8
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 97
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.2
# hardware
gpus: 1
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_dropout
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite, single_conv, substitution]
head: [composite, single_conv, substitution]
# attention
attention: basic
num_positions: 2048
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.1
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_dropout
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite, single_conv, substitution]
head: [composite, single_conv, substitution]
# attention
attention: basic
num_positions: 2500
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.2
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: mnist_debug
dataset: mnist
# training
epochs: 5
warmup_steps: 0.2
batch_size: 64
accumulate_grad_batches: 1
learning_rate: 0.003
# architecture
embed_dim: 16
num_heads: 2
num_layers: 8
num_positions: 512
num_vocab: 3
tree_depth: 6
spatial_dim: 2
gpus: 1
precision: 16
name: mnist_s
dataset: mnist
# training
epochs: 200
warmup_steps: 0.1
batch_size: 4
accumulate_grad_batches: 16
learning_rate: 0.0001
# architecture
embed_dim: 512
num_heads: 8
num_layers: 24
num_positions: 512
num_vocab: 3
tree_depth: 6
spatial_dim: 2
gpus: 1
precision: 16
name: mnist_xs
dataset: mnist
# training
epochs: 100
warmup_steps: 0.1
batch_size: 16
accumulate_grad_batches: 4
learning_rate: 0.0003
# architecture
embed_dim: 64
num_heads: 4
num_layers: 16
num_positions: 512
num_vocab: 3
tree_depth: 6
spatial_dim: 2
gpus: 1
precision: 16
name: mnist_xxs
dataset: mnist
# training
epochs: 30
warmup_steps: 0.1
batch_size: 64
accumulate_grad_batches: 1
learning_rate: 0.003
# architecture
embed_dim: 16
num_heads: 2
num_layers: 8
num_positions: 512
num_vocab: 3
tree_depth: 6
spatial_dim: 2
gpus: 1
precision: 16
name: shapenet_num_heads
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite, single_conv, substitution]
head: [composite, single_conv, substitution]
# attention
attention: basic
num_positions: 2500
embed_dim: 256
num_layers: 8
num_heads: 1
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.0
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_num_heads
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite, single_conv, substitution]
head: [composite, single_conv, substitution]
# attention
attention: basic
num_positions: 2500
embed_dim: 256
num_layers: 8
num_heads: 2
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.0
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_autoencoder
# data
dataset: shapenet
subclass: chair
position_encoding: intertwined
transform: [linear_max_res]
resolution: 32
spatial_dim: 3
num_vocab: 3
# architecture
embedding: double_substitution
head: double_substitution
architecture: autoencoder
attention: none
num_positions: 0
embed_dim: 256
num_layers: 0
num_heads: 0
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 10
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.001
# hardware
gpus: 1
precision: 32
# logging
log_gpu: 'min_max'
log_learning_rate: True
log_gradient: False
log_weights_and_biases: False
parameter_search: False
\ No newline at end of file
name: shapenet_concat_split
# data
dataset: shapenet
subclass: chair
resolution: 32
spatial_dim: 3
num_vocab: 3
# architecture
embedding: concat_A
head: split_A
architecture: encoder_decoder
attention: basic
num_positions: 1024
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
epochs: 100
warmup_steps: 0.1
batch_size: 16
accumulate_grad_batches: 1
learning_rate: 0.0003
# hardware
gpus: 4
precision: 32
# logging
log_gpu: 'min_max'
log_learning_rate: True
log_gradient: True
log_weights_and_biases: False
parameter_search: False
\ No newline at end of file
name: shapenet_debug
# pretrained: None
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_res]
position_encoding: intertwined
resolution: 64
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_only
embedding: [composite_A]
head: [composite_A]
# attention
attention: basic
num_positions: 2048
embed_dim: 512
num_layers: 8
num_heads: 8
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 97
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.2
# hardware
gpus: 1
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_intertwined
# data
dataset: shapenet
subclass: chair
position_encoding: intertwined
resolution: 64
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_only
embedding: [composite_A]
head: [composite_A]
# attention
attention: basic
num_positions: 4096
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 100
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.0003
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_layerwise
pretrained: logs/shapenet_layerwise/version_2/checkpoints/last.ckpt
# data
dataset: shapenet
subclass: chair
transform: [scaling, warping, linear_max_128]
position_encoding: centered
resolution: 128
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_only
embedding: [composite]
head: [composite]
# attention
attention: basic
num_positions: 4096
embed_dim: 256
num_layers: 8
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 100
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
name: shapenet_long
# data
dataset: shapenet
subclass: chair
position_encoding: intertwined
transform: [scaling, warping, linear_max_res]
resolution: 256
spatial_dim: 3
num_vocab: 3
# architecture
architecture: encoder_multi_decoder
embedding: [composite, single_conv, substitution, double_substitution]
head: [composite, single_conv, substitution, double_substitution]
# attention
attention: basic
num_positions: 2500
embed_dim: 256
num_layers: 4
num_heads: 4
# training
loss_function: cross_entropy
val_loss_function: cross_entropy
epochs: 500
warmup_steps: 0.1
batch_size: 1
accumulate_grad_batches: 1
learning_rate: 0.00003
dropout: 0.0
# hardware
gpus: 8
precision: 32
# logging
log_gpu: 'min_max'
log_gradient: False
log_weights_and_biases: False
log_learning_rate: True
parameter_search: False
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment