#!/usr/bin/env bash # NODE_ID=$SLURM_PROCID NODE_ID=$2 TOTAL_NODES=3 MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) GPUS=$1 PORT=${PORT:-29500} MASTER_ADDR=${MASTER_ADDR:-"192.168.1.154"} echo 'The TOTAL_NODES is: ' ${TOTAL_NODES:-1} echo 'The node_rank is: ' ${NODE_ID:-0} echo 'The MASTER_ADDR is: ' $MASTER_ADDR echo 'The GPUS is: ' $GPUS echo 'The port is: ' $PORT CURDIR=$(cd $(dirname $0); pwd) cd $CURDIR export PYTHONPATH="$PYTHONPATH:$CURDIR" echo 'The current dir is: ' $CURDIR python -m torch.distributed.launch \ --nnodes=${TOTAL_NODES:-1} \ --node_rank=${NODE_ID:-0} \ --master_addr=$MASTER_ADDR \ --nproc_per_node=$GPUS \ --master_port=$PORT \ $CURDIR/train.py \ --data-dir /home/remote/u7394442/EmotionROI \ --model vit_base_patch14_dinov2.lvd142m \ --batch-size 3 \ --val-split val \ --decay-epochs 0 \ --decay-milestones 0 \ --decay-rate 0 \ --epochs 300 \ --lr 3e-4 \ --opt adamw \ --num-classes 6 \ --img-size 518 \ --log-wandb True \ --warmup-epochs 0