Skip to content

Commit

Permalink
Merge branch 'Development' into feature/unify_kernels
Browse files Browse the repository at this point in the history
  • Loading branch information
dimtsap authored May 2, 2023
2 parents 1080c15 + bb2c0ce commit 3dc0086
Show file tree
Hide file tree
Showing 39 changed files with 2,597 additions and 45 deletions.
1 change: 1 addition & 0 deletions GitVersion.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ branches: {}
ignore:
sha: []
merge-message-formats: {}

27 changes: 27 additions & 0 deletions docs/code/RunModel/ClusterScript_Example/add_numbers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import sys
import os
import json
import numpy as np


def addNumbers():
inputPath = sys.argv[1]
outputPath = sys.argv[2]

# Open JSON file
with open(inputPath, "r") as jsonFile:
data = json.load(jsonFile)

# Read generated numbers
number1 = data["number1"]
number2 = data["number2"]

randomAddition = number1 + number2

# Write addition to file
with open(outputPath, 'w') as outputFile:
outputFile.write('{}\n'.format(randomAddition))


if __name__ == '__main__':
addNumbers()
20 changes: 20 additions & 0 deletions docs/code/RunModel/ClusterScript_Example/addition_run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import os
import shutil
import fire


def runAddition(index):
index = int(index)

inputRealizationPath = os.path.join(os.getcwd(), 'run_' + str(index), 'InputFiles', 'inputRealization_' \
+ str(index) + ".json")
outputPath = os.path.join(os.getcwd(), 'OutputFiles')

# This is where pre-processing commands would be executed prior to running the cluster script.
command1 = ("echo \"This is where pre-processing would be happening\"")

os.system(command1)


if __name__ == '__main__':
fire.Fire(runAddition)
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"number1" : <var_1>,
"number2" : <var_2>
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import numpy as np
from pathlib import Path


class OutputProcessor:

def __init__(self, index):
filePath = Path("./OutputFiles/qoiFile_" + str(index) + ".txt")
self.numberOfColumns = 0
self.numberOfLines = 0
addedNumbers = []

# Check if file exists
if filePath.is_file():
# Now, open and read data
with open(filePath) as f:
for line in f:
currentLine = line.split()

if len(currentLine) != 0:
addedNumbers.append(currentLine[:])

if not addedNumbers:
self.qoi = np.empty(shape=(0, 0))
else:
self.qoi = np.vstack(addedNumbers)
57 changes: 57 additions & 0 deletions docs/code/RunModel/ClusterScript_Example/run_script.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/bin/bash

# NOTE: The job configuration etc. would be in the batch script that launches
# your python script that uses UQpy. This script would then utilize those
# resources by using the appropriate commands here to launch parallel jobs. For
# example, TACC uses slurm and ibrun, so you would launch your python script in
# the slurm batch script and then use ibrun here to tile parallel runs.

# This function is where you can define all the parts of a single
taskFunction(){
coresPerProc=$1
runNumber=$2
host=$3

let offset=$coresPerProc*$runNumber # Sometimes, this might be necessary to pass as an argument to launch jobs. Not used here.

cd run_$runNumber
# Here, we launch a parallel job. The example uses multiple cores to add numbers,
# which is somewhat pointless. This is just to illustrate the process for how tiled
# parallel jobs are launched and where MPI-capable applications would be initiated
mkdir -p ./OutputFiles
mpirun -n $coresPerProc --host $host:$coresPerProc python3 ../add_numbers.py ./InputFiles/inputRealization_$runNumber.json ./OutputFiles/qoiFile_$runNumber.txt
cd ..
}

# Get list of hosts
echo $SLURM_NODELIST > hostfile

# Split by comma
IFS="," read -ra HOSTS < hostfile

# This is the loop that launches taskFunction in parallel
coresPerProcess=$1
numberOfJobs=$2
# This number will vary depending on the number of cores per node. In this case, it is 32.
N=32

echo
echo "Starting parallel job launch"

declare -i index=0

for i in $(seq 0 $((numberOfJobs-1)))
do
# Launch task function and put into the background
echo "Launching job number ${i} on ${HOSTS[$index]}"
taskFunction $coresPerProcess $i ${HOSTS[$index]}&

# Increment host when all nodes allocated on current node
if !((${i}%N)) && [ $i -ne 0 ]
then
index=${index}+1
fi
done

wait # This wait call is necessary so that loop above completes before script returns
echo "Analyses done!"
78 changes: 78 additions & 0 deletions docs/code/RunModel/cluster_script_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
"""
Cluster Script Example for Third-party
======================================
"""

# %% md
#
# In this case, we're just running a simple addition of random numbers, but
# the process is exactly the same for more complicated workflows. The pre-
# and post-processing is done through `model_script` and `output_script`
# respectively, while the computationally intensive portion of the workflow
# is launched in `cluster_script`. The example below provides a minimal framework
# from which more complex cases can be constructed.
#
# Import the necessary libraries

# %%
from UQpy.sampling import LatinHypercubeSampling
from UQpy.run_model.RunModel import RunModel
from UQpy.run_model.model_execution.ThirdPartyModel import ThirdPartyModel
from UQpy.distributions import Uniform
import numpy as np
import time
import csv

# %% md
#
# Define the distribution objects.

# %%

var_names=["var_1", "var_2"]
distributions = [Uniform(250.0, 40.0), Uniform(66.0, 24.0)]

# %% md
#
# Draw the samples using Latin Hypercube Sampling.

# %%

x_lhs = LatinHypercubeSampling(distributions, nsamples=64)

# %% md
#
# Run the model.

# %%

model = ThirdPartyModel(var_names=var_names, input_template='inputRealization.json', model_script='addition_run.py',
output_script='process_addition_output.py', output_object_name='OutputProcessor',
model_dir='AdditionRuns')

t = time.time()
modelRunner = RunModel(model=model, samples=x_lhs.samples, ntasks=1,
cores_per_task=2, nodes=1, resume=False,
run_type='CLUSTER', cluster_script='./run_script.sh')

t_total = time.time() - t
print("\nTotal time for all experiments:")
print(t_total, "\n")

# %% md
#
# Print model results--this is just for illustration

# %%
for index, experiment in enumerate(modelRunner.qoi_list, 0):
if len(experiment.qoi) != 0:
for item in experiment.qoi:
print("These are the random numbers for sample {}:".format(index))
for sample in x_lhs.samples[index]:
print("{}\t".format(sample))

print("This is their sum:")
for result in item:
print("{}\t".format(result))
print()
2 changes: 2 additions & 0 deletions docs/code/sampling/tempering/README.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Tempering MCMC Examples
^^^^^^^^^^^^^^^^^^^^^^^^^^^
5 changes: 5 additions & 0 deletions docs/code/sampling/tempering/local_reliability_funcs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import numpy as np


def correlated_gaussian(samples, b_eff, d):
return [b_eff * np.sqrt(d) - np.sum(samples[i, :]) for i in range(samples.shape[0])]
Loading

0 comments on commit 3dc0086

Please sign in to comment.