#!/usr/bin/env bash NODE_ID=$SLURM_PROCID TOTAL_NODES=1 MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) GPUS=$1 PORT=${PORT:-29500} echo 'The TOTAL_NODES is: ' ${TOTAL_NODES:-1} echo 'The node_rank is: ' ${NODE_ID:-0} echo 'The MASTER_ADDR is: ' $MASTER_ADDR echo 'The GPUS is: ' $GPUS echo 'The port is: ' $PORT CURDIR=$(cd $(dirname $0); pwd) cd $CURDIR export PYTHONPATH="$PYTHONPATH:$CURDIR" echo 'The current dir is: ' $CURDIR WANDB_MODE=offline torchrun \ --nnodes=${TOTAL_NODES:-1} \ --node_rank=${NODE_ID:-0} \ --master_addr=$MASTER_ADDR \ --nproc_per_node=$GPUS \ --master_port=$PORT \ $CURDIR/train.py \ --data-dir /data/datasets_ml/imagenet \ --model vit_base_patch16_224 \ --batch-size 256 \ --val-split val \ --decay-epochs 0 \ --decay-milestones 0 \ --decay-rate 0 \ --epochs 1000 \ --lr 3e-4 \ --opt adamw \ --num-classes 1000 \ --img-size 224 \ --log-wandb True \ --warmup-epochs 0