Skip to content

Commit

Permalink
some cleanup for twiki
Browse files Browse the repository at this point in the history
  • Loading branch information
jpata committed Nov 2, 2020
1 parent 820662a commit 19c3309
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 14 deletions.
10 changes: 7 additions & 3 deletions RecoParticleFlow/PFProducer/test/mlpf_training/run.sh
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
#!/bin/bash
set -e
set -x
mkdir -p TTbar_14TeV_TuneCUETP8M1_cfi/root
mkdir -p TTbar_14TeV_TuneCUETP8M1_cfi/raw
mkdir -p TTbar_14TeV_TuneCUETP8M1_cfi/tfr/cand

./generate.sh TTbar_14TeV_TuneCUETP8M1_cfi 1 10
cp pfntuple_1.root TTbar_14TeV_TuneCUETP8M1_cfi/root/

python3 preprocessing.py --input TTbar_14TeV_TuneCUETP8M1_cfi/root/ --save-normalized-table --outpath TTbar_14TeV_TuneCUETP8M1_cfi/raw/ --events-per-file 5

echo "now initialize TF 2.3 and call run_tf.sh"
echo "now initialize TF 2.3"
source training_env/bin/activate
python3 preprocessing.py --input TTbar_14TeV_TuneCUETP8M1_cfi/root/pfntuple_1.root --save-normalized-table --outpath TTbar_14TeV_TuneCUETP8M1_cfi/raw/ --events-per-file 5
python3 tf_data.py --datapath TTbar_14TeV_TuneCUETP8M1_cfi --target cand --num-files-per-tfr 1
python3 tf_model.py --datapath TTbar_14TeV_TuneCUETP8M1_cfi --target cand
4 changes: 0 additions & 4 deletions RecoParticleFlow/PFProducer/test/mlpf_training/run_tf.sh

This file was deleted.

14 changes: 7 additions & 7 deletions RecoParticleFlow/PFProducer/test/mlpf_training/tf_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def map_function(x):
return mult_slice

elems = (tf.range(0, tf.cast(num_batches, tf.int64), delta=1, dtype=tf.int64), b)
ret = tf.map_fn(map_function, elems, dtype=tf.float32, back_prop=True)
ret = tf.map_fn(map_function, elems, fn_output_signature=tf.float32, back_prop=True)
return ret


Expand Down Expand Up @@ -324,7 +324,7 @@ def func(args):
return inds, vals

elems = (tf.range(0, tf.cast(n_batches, tf.int64), delta=1, dtype=tf.int64), point_embedding)
ret = tf.map_fn(func, elems, dtype=(tf.int64, tf.float32), parallel_iterations=1)
ret = tf.map_fn(func, elems, fn_output_signature=(tf.int64, tf.float32), parallel_iterations=1)
shp = tf.shape(ret[0])
# #now create a new SparseTensor that is a concatenation of the previous ones
dms = tf.SparseTensor(
Expand Down Expand Up @@ -353,9 +353,9 @@ def subpoints_to_sparse_matrix(self, n_points, subindices, subpoints):
top_k = tf.nn.top_k(dm, k=self.num_neighbors)
top_k_vals = tf.reshape(top_k.values, (nbins*nelems, self.num_neighbors))

indices_gathered = tf.map_fn(
indices_gathered = tf.vectorized_map(
lambda i: tf.gather_nd(subindices, top_k.indices[:, :, i:i+1], batch_dims=1),
tf.range(self.num_neighbors, dtype=tf.int64), dtype=tf.int32)
tf.range(self.num_neighbors, dtype=tf.int64))

indices_gathered = tf.transpose(indices_gathered, [1,2,0])

Expand Down Expand Up @@ -839,8 +839,8 @@ def parse_args():
parser.add_argument("--hidden-dim-id", type=int, default=256, help="hidden dimension")
parser.add_argument("--hidden-dim-reg", type=int, default=256, help="hidden dimension")
parser.add_argument("--batch-size", type=int, default=1, help="number of events in training batch")
parser.add_argument("--num-convs-id", type=int, default=1, help="number of convolution layers")
parser.add_argument("--num-convs-reg", type=int, default=1, help="number of convolution layers")
parser.add_argument("--num-convs-id", type=int, default=3, help="number of convolution layers")
parser.add_argument("--num-convs-reg", type=int, default=3, help="number of convolution layers")
parser.add_argument("--num-hidden-id-enc", type=int, default=2, help="number of encoder layers for multiclass")
parser.add_argument("--num-hidden-id-dec", type=int, default=2, help="number of decoder layers for multiclass")
parser.add_argument("--num-hidden-reg-enc", type=int, default=2, help="number of encoder layers for regression")
Expand All @@ -850,7 +850,7 @@ def parse_args():
parser.add_argument("--bin-size", type=int, default=100, help="number of points per LSH bin")
parser.add_argument("--dropout", type=float, default=0.1, help="Dropout rate")
parser.add_argument("--dist-mult", type=float, default=1.0, help="Exponential multiplier")
parser.add_argument("--target", type=str, choices=["cand", "gen"], help="Regress to PFCandidates or GenParticles", default="gen")
parser.add_argument("--target", type=str, choices=["cand", "gen"], help="Regress to PFCandidates or GenParticles", default="cand")
parser.add_argument("--weights", type=str, choices=["uniform", "inverse", "classbalanced"], help="Sample weighting scheme to use", default="inverse")
parser.add_argument("--name", type=str, default=None, help="where to store the output")
parser.add_argument("--convlayer", type=str, default="ghconv", choices=["ghconv"], help="Type of graph convolutional layer")
Expand Down

0 comments on commit 19c3309

Please sign in to comment.