Difference between revisions of "Wien2k"
(7 intermediate revisions by the same user not shown) | |||
Line 9: | Line 9: | ||
<br> | <br> | ||
− | == Parallel runs | + | == Parallel runs with Slurm == |
− | + | This script is currently untested. Please test and report. | |
− | + | <pre> | |
− | + | #! /bin/bash | |
− | + | # | |
− | <pre>#! /bin/bash | + | # Running wien2k job with slurm using k-point parallelization. |
− | # | + | # |
− | # | + | # Create an apropriate .machine file for wien2k |
− | # | + | # |
+ | # Allocated resources are encode like this: | ||
+ | # SLURM_JOB_CPUS_PER_NODE="4,2(2x)" | ||
+ | # SLURM_NODELIST="bree,carla,mike" | ||
+ | # | ||
+ | # = Run 4 tasks on bree and 2 on carla and mike. | ||
# | # | ||
− | # | + | # Slurm paramters (read 'man sbatch') |
− | |||
− | |||
− | |||
− | |||
− | |||
# | # | ||
− | # | + | #SBATCH --partition=dfg |
− | # | + | #SBATCH --mem-per-cpu=4000 |
+ | #SBATCH --ntasks=8 | ||
+ | #SBATCH --output=job.out | ||
+ | # (--output includes STDERR) | ||
# | # | ||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
# | # | ||
− | |||
− | |||
− | |||
# | # | ||
− | # | + | # Set internal parallelization code in mkl to only use |
− | # | + | # one thread per process. |
− | #$ | + | export OMP_NUM_THREADS=1 |
+ | |||
+ | # Use , as list seperator | ||
+ | IFS=',' | ||
+ | # Convert string to array | ||
+ | hcpus=($SLURM_JOB_CPUS_PER_NODE) | ||
+ | unset IFS | ||
− | + | declare -a conv | |
− | |||
− | |||
− | |||
− | # | + | # Expand compressed slurm array |
− | # | + | for cpu in ${hcpus[@]}; do |
− | + | if [[ $cpu =~ (.*)\((.*)x\) ]]; then | |
+ | # found compressed value | ||
+ | value=${BASH_REMATCH[1]} | ||
+ | factor=${BASH_REMATCH[2]} | ||
+ | for j in $(seq 1 $factor); do | ||
+ | conv=( ${conv[*]} $value ) | ||
+ | done | ||
+ | else | ||
+ | conv=( ${conv[*]} $cpu ) | ||
+ | fi | ||
+ | done | ||
− | # | + | # Build .machines file |
− | + | rm -f .machines | |
− | |||
− | + | nhost=0 | |
− | |||
− | |||
− | |||
− | |||
− | |||
− | + | echo ${conv[@]}; | |
− | + | IFS=',' | |
− | + | for node in $SLURM_NODELIST | |
− | for | + | do |
− | + | declare -i cpuspernode=${conv[$nhost]}; | |
− | done | + | for ((i=0; i<${cpuspernode}; i++)) |
+ | do | ||
+ | echo 1:$node >> .machines | ||
+ | done | ||
+ | let nhost+=1 | ||
+ | done | ||
− | + | echo 'granularity:1' >>.machines | |
− | + | echo 'extrafine:1' >>.machines | |
− | + | # .machines file complete | |
− | |||
# Run your caclulation | # Run your caclulation | ||
x lapw1 -p | x lapw1 -p | ||
− | |||
</pre> | </pre> |
Latest revision as of 10:44, 3 December 2019
This page is indent to give some instructions, how to run the wien2k package in our Cluster.
Access
The official build is installed in the user account "wien2k". Access to this account is restricted to users who are member of the unix group "wien2k". The latest version is always linked to the directory /home/wien2k/wien2k. Your setup in your .bashrc could look lile:
export WIENROOT="/home/wien2k/wien2k" export PATH="$WIENROOT:$PATH" export SCRATCH="/tmp"
Parallel runs with Slurm
This script is currently untested. Please test and report.
#! /bin/bash # # Running wien2k job with slurm using k-point parallelization. # # Create an apropriate .machine file for wien2k # # Allocated resources are encode like this: # SLURM_JOB_CPUS_PER_NODE="4,2(2x)" # SLURM_NODELIST="bree,carla,mike" # # = Run 4 tasks on bree and 2 on carla and mike. # # Slurm paramters (read 'man sbatch') # #SBATCH --partition=dfg #SBATCH --mem-per-cpu=4000 #SBATCH --ntasks=8 #SBATCH --output=job.out # (--output includes STDERR) # # # # Set internal parallelization code in mkl to only use # one thread per process. export OMP_NUM_THREADS=1 # Use , as list seperator IFS=',' # Convert string to array hcpus=($SLURM_JOB_CPUS_PER_NODE) unset IFS declare -a conv # Expand compressed slurm array for cpu in ${hcpus[@]}; do if [[ $cpu =~ (.*)\((.*)x\) ]]; then # found compressed value value=${BASH_REMATCH[1]} factor=${BASH_REMATCH[2]} for j in $(seq 1 $factor); do conv=( ${conv[*]} $value ) done else conv=( ${conv[*]} $cpu ) fi done # Build .machines file rm -f .machines nhost=0 echo ${conv[@]}; IFS=',' for node in $SLURM_NODELIST do declare -i cpuspernode=${conv[$nhost]}; for ((i=0; i<${cpuspernode}; i++)) do echo 1:$node >> .machines done let nhost+=1 done echo 'granularity:1' >>.machines echo 'extrafine:1' >>.machines # .machines file complete # Run your caclulation x lapw1 -p