-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfsdp_config.yaml
30 lines (30 loc) · 1.06 KB
/
fsdp_config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
dynamo_backend: 'NO'
enable_cpu_affinity: false
fsdp_config:
fsdp_activation_checkpointing: true # even 2 gpus not enough without activation checkpointing and 1 is almost enough with it.
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch: BACKWARD_POST
fsdp_cpu_ram_efficient_loading: false
fsdp_forward_prefetch: false
fsdp_offload_params: true
fsdp_sharding_strategy: NO_SHARD
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sync_module_states: true
fsdp_transformer_layer_cls_to_wrap: 'FeedForwardNetwork,DilatedAttention,PatchEmbed'
fsdp_use_orig_params: true # this will help debug gradient at the level of model layers instead of fsdp wrapped unit. Also required if param groups defined before model is prepared/wrapped.
machine_rank: 0
main_process_port: 24220
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 1
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false