|
| 1 | +#! /bin/bash |
| 2 | +set -euo pipefail |
| 3 | + |
| 4 | +print_usage() { |
| 5 | + cat << EOF |
| 6 | +${0} [options] [--] COMMAND [ARG...] |
| 7 | +
|
| 8 | +Control binding policy for each task. Assumes one rank will be launched for each GPU. |
| 9 | +
|
| 10 | +Options: |
| 11 | + --cpu=MODE |
| 12 | + * exclusive -- bind each rank to an exclusive set of cores near its GPU |
| 13 | + * exclusive,nosmt -- bind each rank to an exclusive set of cores near its GPU, without hyperthreading |
| 14 | + * node -- bind each rank to all cores in the NUMA node nearest its GPU [default] |
| 15 | + * *.sh -- bind each rank using the bash associative array bind_cpu_cores or bind_cpu_nodes from a file |
| 16 | + * off -- don't bind |
| 17 | + --mem=MODE |
| 18 | + * node -- bind each rank to the nearest NUMA node [default] |
| 19 | + * *.sh -- bind each rank using the bash associative array bind_mem from a file |
| 20 | + * off -- don't bind |
| 21 | + --ib=MODE |
| 22 | + * single -- bind each rank to a single IB device near its GPU |
| 23 | + * off -- donot bind [default] |
| 24 | + --cluster=CLUSTER |
| 25 | + Select which cluster is being used. May be required if system params cannot be detected. |
| 26 | +EOF |
| 27 | +} |
| 28 | + |
| 29 | +################################################################################ |
| 30 | +# Argument parsing |
| 31 | +################################################################################ |
| 32 | + |
| 33 | +cpu_mode='node' |
| 34 | +mem_mode='node' |
| 35 | +ib_mode='off' |
| 36 | +cluster='' |
| 37 | +while [ $# -gt 0 ]; do |
| 38 | + case "$1" in |
| 39 | + -h|--help) print_usage ; exit 0 ;; |
| 40 | + --cpu=*) cpu_mode="${1/*=/}"; shift ;; |
| 41 | + --cpu) cpu_mode="$2"; shift 2 ;; |
| 42 | + --mem=*) mem_mode="${1/*=/}"; shift ;; |
| 43 | + --mem) mem_mode="$2"; shift 2 ;; |
| 44 | + --ib=*) ib_mode="${1/*=/}"; shift ;; |
| 45 | + --ib) ib_mode="$2"; shift 2 ;; |
| 46 | + --cluster=*) cluster="${1/*=/}"; shift ;; |
| 47 | + --cluster) cluster="$2"; shift 2 ;; |
| 48 | + --) shift; break ;; |
| 49 | + *) break ;; |
| 50 | + esac |
| 51 | +done |
| 52 | +if [ $# -lt 1 ]; then |
| 53 | + echo 'ERROR: no command given' 2>&1 |
| 54 | + print_usage |
| 55 | + exit 1 |
| 56 | +fi |
| 57 | + |
| 58 | +################################################################################ |
| 59 | +# Get system params |
| 60 | +################################################################################ |
| 61 | + |
| 62 | +# LOCAL_RANK is set with an enroot hook for Pytorch containers |
| 63 | +# SLURM_LOCALID is set by Slurm |
| 64 | +# OMPI_COMM_WORLD_LOCAL_RANK is set by mpirun |
| 65 | +readonly local_rank="${LOCAL_RANK:=${SLURM_LOCALID:=${OMPI_COMM_WORLD_LOCAL_RANK:-}}}" |
| 66 | +if [ -z "${local_rank}" ]; then |
| 67 | + echo 'ERROR: cannot read LOCAL_RANK from env' >&2 |
| 68 | + exit 1 |
| 69 | +fi |
| 70 | + |
| 71 | +num_gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader,nounits) |
| 72 | +if [ "${local_rank}" -ge "${num_gpus}" ]; then |
| 73 | + echo "ERROR: local rank is ${local_rank}, but there are only ${num_gpus} gpus available" >&2 |
| 74 | + exit 1 |
| 75 | +fi |
| 76 | + |
| 77 | +get_lscpu_value() { |
| 78 | + awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}" |
| 79 | +} |
| 80 | +lscpu_out=$(lscpu) |
| 81 | +num_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}") |
| 82 | +num_nodes=$(get_lscpu_value 'NUMA node(s)' <<< "${lscpu_out}") |
| 83 | +cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}") |
| 84 | + |
| 85 | +echo "num_sockets = ${num_sockets} num_nodes=${num_nodes} cores_per_socket=${cores_per_socket}" |
| 86 | + |
| 87 | +readonly cores_per_node=$(( (num_sockets * cores_per_socket) / num_nodes )) |
| 88 | +if [ ${num_gpus} -gt 1 ]; then |
| 89 | + readonly gpus_per_node=$(( num_gpus / num_nodes )) |
| 90 | +else |
| 91 | + readonly gpus_per_node=1 |
| 92 | +fi |
| 93 | +readonly cores_per_gpu=$(( cores_per_node / gpus_per_node )) |
| 94 | +readonly local_node=$(( local_rank / gpus_per_node )) |
| 95 | + |
| 96 | + |
| 97 | +declare -a ibdevs=() |
| 98 | +case "${cluster}" in |
| 99 | + circe) |
| 100 | + # Need to specialize for circe because IB detection is hard |
| 101 | + ibdevs=(mlx5_1 mlx5_2 mlx5_3 mlx5_4 mlx5_7 mlx5_8 mlx5_9 mlx5_10) |
| 102 | + ;; |
| 103 | + selene) |
| 104 | + # Need to specialize for selene because IB detection is hard |
| 105 | + ibdevs=(mlx5_0 mlx5_1 mlx5_2 mlx5_3 mlx5_6 mlx5_7 mlx5_8 mlx5_9) |
| 106 | + ;; |
| 107 | + '') |
| 108 | + if ibstat_out="$(ibstat -l 2>/dev/null | sort -V)" ; then |
| 109 | + mapfile -t ibdevs <<< "${ibstat_out}" |
| 110 | + fi |
| 111 | + ;; |
| 112 | + *) |
| 113 | + echo "ERROR: Unknown cluster '${cluster}'" >&2 |
| 114 | + exit 1 |
| 115 | + ;; |
| 116 | +esac |
| 117 | +readonly num_ibdevs="${#ibdevs[@]}" |
| 118 | + |
| 119 | +################################################################################ |
| 120 | +# Setup for exec |
| 121 | +################################################################################ |
| 122 | + |
| 123 | +declare -a numactl_args=() |
| 124 | + |
| 125 | +case "${cpu_mode}" in |
| 126 | + exclusive) |
| 127 | + numactl_args+=( "$(printf -- "--physcpubind=%u-%u,%u-%u" \ |
| 128 | + $(( local_rank * cores_per_gpu )) \ |
| 129 | + $(( (local_rank + 1) * cores_per_gpu - 1 )) \ |
| 130 | + $(( local_rank * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) )) \ |
| 131 | + $(( (local_rank + 1) * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) - 1 )) \ |
| 132 | + )" ) |
| 133 | + ;; |
| 134 | + exclusive,nosmt) |
| 135 | + numactl_args+=( "$(printf -- "--physcpubind=%u-%u" \ |
| 136 | + $(( local_rank * cores_per_gpu )) \ |
| 137 | + $(( (local_rank + 1) * cores_per_gpu - 1 )) \ |
| 138 | + )" ) |
| 139 | + ;; |
| 140 | + node) |
| 141 | + numactl_args+=( "--cpunodebind=${local_node}" ) |
| 142 | + ;; |
| 143 | + *.sh) |
| 144 | + source "${cpu_mode}" |
| 145 | + if [ -n "${bind_cpu_cores:-}" ]; then |
| 146 | + numactl_args+=( "--physcpubind=${bind_cpu_cores[${local_rank}]}" ) |
| 147 | + elif [ -n "${bind_cpu_nodes:-}" ]; then |
| 148 | + numactl_args+=( "--cpunodebind=${bind_cpu_nodes[${local_rank}]}" ) |
| 149 | + else |
| 150 | + echo "ERROR: invalid CPU affinity file ${cpu_mode}." >&2 |
| 151 | + exit 1 |
| 152 | + fi |
| 153 | + ;; |
| 154 | + off|'') |
| 155 | + ;; |
| 156 | + *) |
| 157 | + echo "ERROR: invalid cpu mode '${cpu_mode}'" 2>&1 |
| 158 | + print_usage |
| 159 | + exit 1 |
| 160 | + ;; |
| 161 | +esac |
| 162 | + |
| 163 | +case "${mem_mode}" in |
| 164 | + node) |
| 165 | + numactl_args+=( "--membind=${local_node}" ) |
| 166 | + ;; |
| 167 | + *.sh) |
| 168 | + source "${mem_mode}" |
| 169 | + if [ -z "${bind_mem:-}" ]; then |
| 170 | + echo "ERROR: invalid memory affinity file ${mem_mode}." >&2 |
| 171 | + exit 1 |
| 172 | + fi |
| 173 | + numactl_args+=( "--membind=${bind_mem[${local_rank}]}" ) |
| 174 | + ;; |
| 175 | + off|'') |
| 176 | + ;; |
| 177 | + *) |
| 178 | + echo "ERROR: invalid mem mode '${mem_mode}'" 2>&1 |
| 179 | + print_usage |
| 180 | + exit 1 |
| 181 | + ;; |
| 182 | +esac |
| 183 | + |
| 184 | +case "${ib_mode}" in |
| 185 | + single) |
| 186 | + if [ "${num_ibdevs}" -eq 0 ]; then |
| 187 | + echo "WARNING: used '$0 --ib=single', but there are 0 IB devices available; skipping IB binding." 2>&1 |
| 188 | + else |
| 189 | + readonly ibdev="${ibdevs[$(( local_rank * num_ibdevs / num_gpus ))]}" |
| 190 | + export OMPI_MCA_btl_openib_if_include="${OMPI_MCA_btl_openib_if_include-$ibdev}" |
| 191 | + fi |
| 192 | + ;; |
| 193 | + off|'') |
| 194 | + ;; |
| 195 | + *) |
| 196 | + echo "ERROR: invalid ib mode '${ib_mode}'" 2>&1 |
| 197 | + print_usage |
| 198 | + exit 1 |
| 199 | + ;; |
| 200 | +esac |
| 201 | + |
| 202 | +################################################################################ |
| 203 | +# Exec |
| 204 | +################################################################################ |
| 205 | + |
| 206 | +if [ "${#numactl_args[@]}" -gt 0 ] ; then |
| 207 | + set -x |
| 208 | + exec numactl "${numactl_args[@]}" -- "${@}" |
| 209 | +else |
| 210 | + exec "${@}" |
| 211 | +fi |
0 commit comments