mirror of https://github.com/nomic-ai/gpt4all
fix: update train scripts and configs for other models (#1164)
* feat: falcon config * feat: mpt config * chore: gitignore * refactor: step calculation * fix: attention mask + shuffle on epoch end * fix: return tensors * fix: wait for everyone * chore: config * chore: ds config * fix: remove ccols * fix: logging and saving * chore: add einopspull/1194/head
parent
e8b19b8e82
commit
6c4f449b7a
@ -0,0 +1,49 @@
|
||||
{
|
||||
"train_batch_size": "auto",
|
||||
"gradient_accumulation_steps": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"fp16": {
|
||||
"enabled": "auto",
|
||||
"min_loss_scale": 1,
|
||||
"loss_scale_window": 1000,
|
||||
"hysteresis": 2,
|
||||
"initial_scale_power": 32
|
||||
},
|
||||
"bf16": {
|
||||
"enabled": "auto"
|
||||
},
|
||||
"gradient_clipping": 1.0,
|
||||
"zero_optimization": {
|
||||
"stage": 1,
|
||||
"offload_param": {
|
||||
"device": "none"
|
||||
},
|
||||
"offload_optimizer": {
|
||||
"device": "none"
|
||||
},
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"contiguous_gradients": true
|
||||
},
|
||||
"optimizer": {
|
||||
"type": "AdamW",
|
||||
"params": {
|
||||
"lr": "auto",
|
||||
"betas": [
|
||||
0.9,
|
||||
0.999
|
||||
],
|
||||
"eps": 1e-08
|
||||
}
|
||||
},
|
||||
"scheduler": {
|
||||
"type": "WarmupDecayLR",
|
||||
"params": {
|
||||
"warmup_min_lr": 0,
|
||||
"warmup_max_lr": "auto",
|
||||
"warmup_num_steps": "auto",
|
||||
"warmup_type": "linear",
|
||||
"total_num_steps": "auto"
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
{
|
||||
"train_batch_size": "auto",
|
||||
"gradient_accumulation_steps": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"fp16": {
|
||||
"enabled": "auto",
|
||||
"min_loss_scale": 1,
|
||||
"loss_scale_window": 1000,
|
||||
"hysteresis": 2,
|
||||
"initial_scale_power": 32
|
||||
},
|
||||
"bf16": {
|
||||
"enabled": "auto"
|
||||
},
|
||||
"gradient_clipping": 1.0,
|
||||
"zero_optimization": {
|
||||
"stage": 2,
|
||||
"offload_param": {
|
||||
"device": "none"
|
||||
},
|
||||
"offload_optimizer": {
|
||||
"device": "none"
|
||||
},
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"contiguous_gradients": true
|
||||
},
|
||||
"optimizer": {
|
||||
"type": "AdamW",
|
||||
"params": {
|
||||
"lr": "auto",
|
||||
"betas": [
|
||||
0.9,
|
||||
0.999
|
||||
],
|
||||
"eps": 1e-08
|
||||
}
|
||||
},
|
||||
"scheduler": {
|
||||
"type": "WarmupLR",
|
||||
"params": {
|
||||
"warmup_min_lr": 0,
|
||||
"warmup_max_lr": "auto",
|
||||
"warmup_num_steps": "auto",
|
||||
"warmup_type": "linear"
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
# model/tokenizer
|
||||
model_name: "tiiuae/falcon-7b"
|
||||
tokenizer_name: "tiiuae/falcon-7b"
|
||||
gradient_checkpointing: true
|
||||
save_name: "nomic-ai/gpt4all-falcon"
|
||||
|
||||
# dataset
|
||||
streaming: false
|
||||
num_proc: 64
|
||||
dataset_path: "nomic-ai/gpt4all-j-prompt-generations"
|
||||
revision: "v1.3-groovy"
|
||||
max_length: 1024
|
||||
batch_size: 32
|
||||
|
||||
# train dynamics
|
||||
lr: 2.0e-5
|
||||
min_lr: 0
|
||||
weight_decay: 0.0
|
||||
eval_every: 500
|
||||
eval_steps: 105
|
||||
save_every: 1000
|
||||
log_grads_every: 500
|
||||
output_dir: "ckpts/falcon"
|
||||
checkpoint: "/home/paperspace/gpt4all/ckpts/mpt/step_1000"
|
||||
lora: false
|
||||
warmup_steps: 500
|
||||
num_epochs: 2
|
||||
|
||||
# logging
|
||||
wandb: true
|
||||
wandb_entity: "gpt4all"
|
||||
wandb_project_name: "gpt4all"
|
||||
seed: 42
|
||||
|
@ -0,0 +1,34 @@
|
||||
# model/tokenizer
|
||||
model_name: "mosaicml/mpt-7b"
|
||||
tokenizer_name: "mosaicml/mpt-7b"
|
||||
gradient_checkpointing: false
|
||||
save_name: "nomic-ai/mpt-finetuned-round2"
|
||||
|
||||
# dataset
|
||||
streaming: false
|
||||
num_proc: 64
|
||||
dataset_path: "nomic-ai/gpt4all-j-prompt-generations"
|
||||
revision: "v1.3-groovy"
|
||||
max_length: 1024
|
||||
batch_size: 8
|
||||
|
||||
# train dynamics
|
||||
lr: 2.0e-5
|
||||
min_lr: 0
|
||||
weight_decay: 0.0
|
||||
eval_every: 500
|
||||
eval_steps: 105
|
||||
save_every: 1000
|
||||
log_grads_every: 500
|
||||
output_dir: "ckpts/mpt"
|
||||
checkpoint: null
|
||||
lora: false
|
||||
warmup_steps: 500
|
||||
num_epochs: 2
|
||||
|
||||
# logging
|
||||
wandb: false
|
||||
wandb_entity: "gpt4all"
|
||||
wandb_project_name: "gpt4all"
|
||||
seed: 42
|
||||
|
@ -0,0 +1,34 @@
|
||||
# model/tokenizer
|
||||
model_name: "openlm-research/open_llama_7b"
|
||||
tokenizer_name: "openlm-research/open_llama_7b"
|
||||
gradient_checkpointing: true
|
||||
save_name: "nomic-ai/gpt4all-openllama"
|
||||
|
||||
# dataset
|
||||
streaming: false
|
||||
num_proc: 64
|
||||
dataset_path: "nomic-ai/gpt4all-updated"
|
||||
revision: null
|
||||
max_length: 1024
|
||||
batch_size: 32
|
||||
|
||||
# train dynamics
|
||||
lr: 2.0e-5
|
||||
min_lr: 0
|
||||
weight_decay: 0.0
|
||||
eval_every: 500
|
||||
log_every: 10
|
||||
save_every: 1000
|
||||
log_grads_every: 500
|
||||
output_dir: "ckpts/falcon"
|
||||
checkpoint: null
|
||||
lora: false
|
||||
warmup_steps: 500
|
||||
num_epochs: 3
|
||||
|
||||
# logging
|
||||
wandb: true
|
||||
wandb_entity: "gpt4all"
|
||||
wandb_project_name: "gpt4all"
|
||||
seed: 42
|
||||
|
Loading…
Reference in New Issue