diff options
Diffstat (limited to 'sysutils/slurm-hpc/files')
7 files changed, 408 insertions, 0 deletions
diff --git a/sysutils/slurm-hpc/files/patch-configure b/sysutils/slurm-hpc/files/patch-configure new file mode 100644 index 000000000000..fb981236cba1 --- /dev/null +++ b/sysutils/slurm-hpc/files/patch-configure @@ -0,0 +1,41 @@ +--- configure.orig 2013-09-10 16:44:33.000000000 -0500 ++++ configure 2013-11-14 10:23:02.000000000 -0600 +@@ -21594,12 +21594,9 @@ + main () + { + +- int main() +- { + MYSQL mysql; + (void) mysql_init(&mysql); + (void) mysql_close(&mysql); +- } + + ; + return 0; +@@ -21636,12 +21633,9 @@ + main () + { + +- int main() +- { + MYSQL mysql; + (void) mysql_init(&mysql); + (void) mysql_close(&mysql); +- } + + ; + return 0; +@@ -21803,12 +21797,9 @@ + main () + { + +- int main() +- { + PGconn *conn; + conn = PQconnectdb("dbname = postgres"); + (void) PQfinish(conn); +- } + + ; + return 0; diff --git a/sysutils/slurm-hpc/files/patch-src-plugins-acct_gather_filesystem-lustre-acct_gather_filesystem_lustre.c b/sysutils/slurm-hpc/files/patch-src-plugins-acct_gather_filesystem-lustre-acct_gather_filesystem_lustre.c new file mode 100644 index 000000000000..9c14d1d219f3 --- /dev/null +++ b/sysutils/slurm-hpc/files/patch-src-plugins-acct_gather_filesystem-lustre-acct_gather_filesystem_lustre.c @@ -0,0 +1,11 @@ +--- src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c.orig 2013-09-10 16:44:33.000000000 -0500 ++++ src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c 2013-11-14 10:23:02.000000000 -0600 +@@ -49,6 +49,8 @@ + #include <getopt.h> + #include <netinet/in.h> + ++#include <limits.h> ++ + + #include "src/common/slurm_xlator.h" + #include "src/common/slurm_acct_gather_filesystem.h" diff --git a/sysutils/slurm-hpc/files/patch-src-plugins-select-cons_res-dist_tasks.c b/sysutils/slurm-hpc/files/patch-src-plugins-select-cons_res-dist_tasks.c new file mode 100644 index 000000000000..c2166f12123e --- /dev/null +++ b/sysutils/slurm-hpc/files/patch-src-plugins-select-cons_res-dist_tasks.c @@ -0,0 +1,68 @@ +--- src/plugins/select/cons_res/dist_tasks.c.orig 2013-09-10 16:44:33.000000000 -0500 ++++ src/plugins/select/cons_res/dist_tasks.c 2013-11-14 10:23:02.000000000 -0600 +@@ -271,6 +271,30 @@ + return SLURM_SUCCESS; + } + ++// These were nested below, which is not legal in standard C ++ ++ /* qsort compare function for ascending int list */ ++ int _cmp_int_ascend (const void *a, const void *b) ++ { ++ return (*(int*)a - *(int*)b); ++ } ++ ++ /* qsort compare function for descending int list */ ++ int _cmp_int_descend (const void *a, const void *b) ++ { ++ return (*(int*)b - *(int*)a); ++ } ++ ++ int* sockets_cpu_cnt; ++ ++ /* qsort compare function for board combination socket ++ * list */ ++ int _cmp_sock (const void *a, const void *b) ++ { ++ return (sockets_cpu_cnt[*(int*)b] - ++ sockets_cpu_cnt[*(int*)a]); ++ } ++ + /* sync up core bitmap with new CPU count using a best-fit approach + * on the available resources on each node + * +@@ -298,7 +322,6 @@ + int elig_idx, comb_brd_idx, sock_list_idx, comb_min, board_num; + int* boards_cpu_cnt; + int* sort_brds_cpu_cnt; +- int* sockets_cpu_cnt; + int* board_combs; + int* socket_list; + int* elig_brd_combs; +@@ -316,26 +339,6 @@ + uint64_t ncomb_brd; + bool sufficient,best_fit_sufficient; + +- /* qsort compare function for ascending int list */ +- int _cmp_int_ascend (const void *a, const void *b) +- { +- return (*(int*)a - *(int*)b); +- } +- +- /* qsort compare function for descending int list */ +- int _cmp_int_descend (const void *a, const void *b) +- { +- return (*(int*)b - *(int*)a); +- } +- +- /* qsort compare function for board combination socket +- * list */ +- int _cmp_sock (const void *a, const void *b) +- { +- return (sockets_cpu_cnt[*(int*)b] - +- sockets_cpu_cnt[*(int*)a]); +- } +- + if (!job_res) + return; + diff --git a/sysutils/slurm-hpc/files/patch-src-plugins-task-cgroup-task_cgroup_cpuset.c b/sysutils/slurm-hpc/files/patch-src-plugins-task-cgroup-task_cgroup_cpuset.c new file mode 100644 index 000000000000..ffeafb1a879d --- /dev/null +++ b/sysutils/slurm-hpc/files/patch-src-plugins-task-cgroup-task_cgroup_cpuset.c @@ -0,0 +1,33 @@ +--- src/plugins/task/cgroup/task_cgroup_cpuset.c.orig 2013-11-14 10:56:33.000000000 -0600 ++++ src/plugins/task/cgroup/task_cgroup_cpuset.c 2013-11-14 11:10:51.000000000 -0600 +@@ -59,7 +59,12 @@ + + #ifdef HAVE_HWLOC + #include <hwloc.h> ++#if !defined(__FreeBSD__) + #include <hwloc/glibc-sched.h> ++#else ++// For cpuset ++#include <pthread_np.h> ++#endif + + # if HWLOC_API_VERSION <= 0x00010000 + /* After this version the cpuset structure and all it's functions +@@ -714,7 +719,7 @@ + hwloc_obj_type_t req_hwtype; + + size_t tssize; +- cpu_set_t ts; ++ cpuset_t ts; + + bind_type = job->cpu_bind_type ; + if (conf->task_plugin_param & CPU_BIND_VERBOSE || +@@ -900,7 +905,7 @@ + + hwloc_bitmap_asprintf(&str, cpuset); + +- tssize = sizeof(cpu_set_t); ++ tssize = sizeof(cpuset_t); + if (hwloc_cpuset_to_glibc_sched_affinity(topology,cpuset, + &ts,tssize) == 0) { + fstatus = SLURM_SUCCESS; diff --git a/sysutils/slurm-hpc/files/slurm.conf.in b/sysutils/slurm-hpc/files/slurm.conf.in new file mode 100644 index 000000000000..226090ea3dd4 --- /dev/null +++ b/sysutils/slurm-hpc/files/slurm.conf.in @@ -0,0 +1,169 @@ +# slurm.conf file generated by configurator.html. +# Put this file on all nodes of your cluster. +# See the slurm.conf man page for more information. +# +ControlMachine=%%CONTROL_MACHINE%% +#ControlAddr= +#BackupController=%%BACKUP_CONTROL_MACHINE%% +#BackupAddr= +# +AuthType=auth/munge +CacheGroups=0 +#CheckpointType=checkpoint/none +CryptoType=crypto/munge +#DisableRootJobs=NO +#EnforcePartLimits=NO +#Epilog= +#EpilogSlurmctld= +#FirstJobId=1 +#MaxJobId=999999 +#GresTypes= +#GroupUpdateForce=0 +#GroupUpdateTime=600 +#JobCheckpointDir=/var/slurm/checkpoint +#JobCredentialPrivateKey= +#JobCredentialPublicCertificate= +#JobFileAppend=0 +#JobRequeue=1 +#JobSubmitPlugins=1 +#KillOnBadExit=0 +#LaunchType=launch/slurm +#Licenses=foo*4,bar +MailProg=/usr/bin/mail +#MaxJobCount=5000 +#MaxStepCount=40000 +#MaxTasksPerNode=128 +MpiDefault=none +#MpiParams=ports=#-# +#PluginDir= +#PlugStackConfig= +#PrivateData=jobs +ProctrackType=proctrack/pgid +#Prolog= +#PrologSlurmctld= +#PropagatePrioProcess=0 +#PropagateResourceLimits= +# Prevent head node limits from being applied to jobs! +PropagateResourceLimitsExcept=ALL +#RebootProgram= +ReturnToService=1 +#SallocDefaultCommand= +SlurmctldPidFile=/var/run/slurmctld.pid +SlurmctldPort=6817 +SlurmdPidFile=/var/run/slurmd.pid +SlurmdPort=6818 +SlurmdSpoolDir=/var/spool/slurmd +SlurmUser=slurm +#SlurmdUser=root +#SrunEpilog= +#SrunProlog= +StateSaveLocation=/home/slurm/slurmctld +SwitchType=switch/none +#TaskEpilog= +TaskPlugin=task/none +#TaskPluginParam= +#TaskProlog= +#TopologyPlugin=topology/tree +#TmpFs=/tmp +#TrackWCKey=no +#TreeWidth= +#UnkillableStepProgram= +#UsePAM=0 +# +# +# TIMERS +#BatchStartTimeout=10 +#CompleteWait=0 +#EpilogMsgTime=2000 +#GetEnvTimeout=2 +#HealthCheckInterval=0 +#HealthCheckProgram= +InactiveLimit=0 +KillWait=30 +#MessageTimeout=10 +#ResvOverRun=0 +MinJobAge=300 +#OverTimeLimit=0 +SlurmctldTimeout=120 +SlurmdTimeout=300 +#UnkillableStepTimeout=60 +#VSizeFactor=0 +Waittime=0 +# +# +# SCHEDULING +#DefMemPerCPU=0 +FastSchedule=1 +#MaxMemPerCPU=0 +#SchedulerRootFilter=1 +#SchedulerTimeSlice=30 +SchedulerType=sched/backfill +SchedulerPort=7321 +SelectType=select/cons_res +#SelectTypeParameters= +# +# +# JOB PRIORITY +#PriorityType=priority/basic +#PriorityDecayHalfLife= +#PriorityCalcPeriod= +#PriorityFavorSmall= +#PriorityMaxAge= +#PriorityUsageResetPeriod= +#PriorityWeightAge= +#PriorityWeightFairshare= +#PriorityWeightJobSize= +#PriorityWeightPartition= +#PriorityWeightQOS= +# +# +# LOGGING AND ACCOUNTING +#AccountingStorageEnforce=0 +#AccountingStorageHost= +#AccountingStorageLoc= +#AccountingStoragePass= +#AccountingStoragePort= +AccountingStorageType=accounting_storage/none +#AccountingStorageUser= +AccountingStoreJobComment=YES +ClusterName=cluster +#DebugFlags= +#JobCompHost= +#JobCompLoc= +#JobCompPass= +#JobCompPort= +JobCompType=jobcomp/none +#JobCompUser= +JobAcctGatherFrequency=30 +JobAcctGatherType=jobacct_gather/none +SlurmctldDebug=5 +SlurmctldLogFile=/var/log/slurmctld +SlurmdDebug=5 +SlurmdLogFile=/var/log/slurmd +#SlurmSchedLogFile= +#SlurmSchedLogLevel= +# +# +# POWER SAVE SUPPORT FOR IDLE NODES (optional) +#SuspendProgram= +#ResumeProgram= +#SuspendTimeout= +#ResumeTimeout= +#ResumeRate= +#SuspendExcNodes= +#SuspendExcParts= +#SuspendRate= +#SuspendTime= +# +# +# COMPUTE NODES + +############################################################################# +# Note: Using CPUs=2 or Sockets=2 causes slurmctld to seg fault on FreeBSD. +# Use Sockets=1, CoresPerSocket=total-cores-in-node, and +# ThreadsPerCore=N, even if your motherboard has more than 1 socket. +# This issue is related to get_cpuinfo() and is being investigated. +############################################################################# + +NodeName=compute-[001-002] Sockets=1 CoresPerSocket=1 ThreadsPerCore=1 State=UNKNOWN +PartitionName=default-partition Nodes=compute-[001-002] Default=YES MaxTime=INFINITE State=UP diff --git a/sysutils/slurm-hpc/files/slurmctld.in b/sysutils/slurm-hpc/files/slurmctld.in new file mode 100644 index 000000000000..5199e48b23bc --- /dev/null +++ b/sysutils/slurm-hpc/files/slurmctld.in @@ -0,0 +1,43 @@ +#!/bin/sh + +# PROVIDE: slurmctld +# REQUIRE: DAEMON munge +# BEFORE: LOGIN +# KEYWORD: shutdown +# +# Add the following lines to /etc/rc.conf.local or /etc/rc.conf +# to enable this service: +# +# slurmctld_enable (bool): Set to NO by default. +# Set it to YES to enable slurmctld. +# + +. /etc/rc.subr + +name="slurmctld" +rcvar=slurmctld_enable + +pidfile=/var/run/$name.pid + +load_rc_config $name + +: ${slurmctld_enable="NO"} + +start_cmd=slurmctld_start +stop_cmd=slurmctld_stop + +slurmctld_start() { + checkyesno slurmctld_enable && echo "Starting $name." && \ + %%PREFIX%%/sbin/$name $slurmctld_flags +} + +slurmctld_stop() { + if [ -e $pidfile ]; then + checkyesno slurmctld_enable && echo "Stopping $name." && \ + kill `cat $pidfile` + else + killall $name + fi +} + +run_rc_command "$1" diff --git a/sysutils/slurm-hpc/files/slurmd.in b/sysutils/slurm-hpc/files/slurmd.in new file mode 100644 index 000000000000..277d48a2d317 --- /dev/null +++ b/sysutils/slurm-hpc/files/slurmd.in @@ -0,0 +1,43 @@ +#!/bin/sh + +# PROVIDE: slurmd +# REQUIRE: DAEMON munge +# BEFORE: LOGIN +# KEYWORD: shutdown +# +# Add the following lines to /etc/rc.conf.local or /etc/rc.conf +# to enable this service: +# +# slurmd_enable (bool): Set to NO by default. +# Set it to YES to enable slurmd. +# + +. /etc/rc.subr + +name="slurmd" +rcvar=slurmd_enable + +pidfile=/var/run/$name.pid + +load_rc_config $name + +: ${slurmd_enable="NO"} + +start_cmd=slurmd_start +stop_cmd=slurmd_stop + +slurmd_start() { + checkyesno slurmd_enable && echo "Starting $name." && \ + %%PREFIX%%/sbin/$name $slurmd_flags +} + +slurmd_stop() { + if [ -e $pidfile ]; then + checkyesno slurmd_enable && echo "Stopping $name." && \ + kill `cat $pidfile` + else + killall $name + fi +} + +run_rc_command "$1" |