-
Notifications
You must be signed in to change notification settings - Fork 132
Expand file tree
/
Copy pathscannet_nano.yaml
More file actions
57 lines (43 loc) · 1.18 KB
/
scannet_nano.yaml
File metadata and controls
57 lines (43 loc) · 1.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# @package _global_
# to execute this experiment run:
# python train.py experiment=panoptic/scannet_nano
defaults:
- override /datamodule: panoptic/scannet_nano.yaml
- override /model: panoptic/nano-2.yaml
- override /trainer: gpu.yaml
# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
trainer:
max_epochs: 100
model:
optimizer:
lr: 0.01
weight_decay: 1e-4
scheduler:
num_warmup: 2
_node_mlp_out: 32
_h_edge_mlp_out: 32
_down_dim: [ 32, 32, 32, 32 ]
_up_dim: [ 32, 32, 32 ]
net:
no_ffn: False
down_ffn_ratio: 1
partitioner:
regularization: 20
x_weight: 5e-2
cutoff: 300
edge_affinity_loss_lambda: 10
partition_every_n_epoch: 4
logger:
wandb:
project: "spt_scannet"
name: "NANO"
# metric based on which models will be selected
optimized_metric: "val/pq"
# modify checkpointing callbacks to adapt to partition_every_n_epoch
# being potentially different
callbacks:
model_checkpoint:
every_n_epochs: ${eval:'max(${trainer.check_val_every_n_epoch}, ${model.partition_every_n_epoch})'}
early_stopping:
strict: False