-
Notifications
You must be signed in to change notification settings - Fork 4
/
pipeline.cwl
executable file
·1312 lines (1209 loc) · 40.3 KB
/
pipeline.cwl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env cwl-runner
class: Workflow
cwlVersion: v1.2
requirements:
- class: SubworkflowFeatureRequirement
- class: InlineJavascriptRequirement
- class: StepInputExpressionRequirement
- class: MultipleInputFeatureRequirement
inputs:
skip_formatting:
type: boolean?
doc: If true, will skip first two stages and start at processing.
# pseudochannel sorting vars, if present then it will be assumed that sorting must be performed.
channel_yml:
type: File?
doc: PyYML-formatted list containing a dictionary outlining how the truechannels in imaging relate to the pseudochannels in the decoding codebook. The index of each dict within the list is the trueround % (count of pseudorounds). The keys of the dict are the channels within the image and the values are the pseudochannels in the converted notebook.
cycle_yml:
type: File?
doc: PyYML-formatted dictionary outlining how the truerounds in imaging relate to the pseudorounds in the decoding codebook. The keys are truerounds and the values are the corresponding pseudorounds.
selected_fovs:
type: int[]?
doc: If provided, steps after conversion will only be run on FOVs with these indices.
# format of input vars
# can be read into converter or sorter, followed by string literal input will be used for conversion
tiffs:
type: Directory?
doc: The directory containing all .tiff files
codebook_csv:
type: File?
doc: Flattened csv input, refer to record entry.
codebook_json:
type: File?
doc: Flattened json input, refer to record entry.
locs_json:
type: File?
doc: Flattened json input, refer to record entry.
data_org_file:
type: File?
doc: The data org file used to describe .dax formatted images.
mask_roi_files:
type: Directory?
doc: Flattened directory input, refer to record entry "binary_mask"
mask_roi_formats:
type: string?
doc: Flattened record input, refer to record entry "binary_mask"
mask_labeled_files:
type: Directory?
doc: Flattened file input, refer to record entry "binary_mask"
mask_labeled_formats:
type: string?
doc: Flattened record input, refer to record entry "binary_mask"
codebook:
type:
- 'null'
- type: record
name: csv
fields:
csv:
type: File
doc: The codebook for this experiment in .csv format, where the rows are barcodes and the columns are imaging rounds. Column IDs are expected to be sequential, and round identifiers are expected to be integers (not roman numerals).
- type: record
name: json
fields:
json:
type: File
doc: The codebook for this experiment, already formatted in the spaceTx defined .json format.
parameter_json:
type: File?
doc: json file containing parameters for the whole experiment. If variable is present here, it will supercede any passed in the yml.
round_count:
type: int?
doc: The number of imaging rounds in the experiment
zplane_count:
type: int?
doc: The number of z-planes in each image
channel_count:
type: int?
doc: The number of total channels per imaging round
fov_count:
type: int?
doc: The number of FOVs that are included in this experiment
round_offset:
type: int?
doc: The index of the first round (for file names).
fov_offset:
type: int?
doc: The index of the first FOV (for file names).
channel_offset:
type: int?
doc: The index of the first channel (for file names).
file_format:
type: string?
doc: String with layout for .tiff files
file_vars:
type: string[]?
doc: Variables to get substituted into the file_format string.
cache_read_order:
type: string[]?
doc: Order of non x,y dimensions within each image.
aux_tilesets:
- 'null'
- type: record
name: aux_tilesets
fields:
aux_names:
type: string[]?
doc: Names of the Auxillary tiles.
aux_file_formats:
type: string[]?
doc: String layout for .tiff files of aux views.
aux_file_vars:
type: string[]?
doc: Variables to be substituted into aux_file_formats. One entry per aux_name, with semicolon-delimited vars.
aux_cache_read_order:
type: string[]?
doc: Order of non x,y dimensions within each image. One entry per aux_name, with semicolon-delimited vars.
aux_single_round:
type: string[]?
doc: If True, the specified aux view will only have one round.
aux_channel_count:
type: float[]?
doc: Count of channels in each aux image.
aux_channel_slope:
type: float[]?
doc: Used to convert 0-indexed channel IDs to the channel index within the image. Calculated as (image index) = int(index*slope) + intercept
aux_channel_intercept:
type: int[]?
doc: Used to convert 0-indexed channel IDs to the channel index within the image. Calculated as (image index) = int(index*slope) + intercept
fov_positioning:
- 'null'
- type: record
name: locs
fields:
locs:
type: File?
doc: Input locations as a json file, using the same records as below.
- type: record
name: fov_positioning
fields:
- name: x_locs
type: string?
doc: list of x-axis start locations per fov index
- name: x_shape
type: int?
doc: shape of each fov item in the x-axis
- name: x_voxel
type: float?
doc: size of voxels in the x-axis
- name: y_locs
type: string?
doc: list of y-axis start locations per fov index
- name: y_shape
type: int?
doc: shape of each fov item in the y-axis
- name: y_voxel
type: float?
doc: size of voxels in the y-axis
- name: z_locs
type: string?
doc: list of z-axis start locations per fov index
- name: z_shape
type: int?
doc: shape of each fov item in the z-axis
- name: z_voxel
type: float?
doc: size of voxels in the z-axis
add_blanks:
type: boolean?
doc: If true, will add blanks with a hamming distance 1 from existing codes.
# image processing
input_dir:
type: Directory?
doc: Root directory containing space_tx formatted experiment. Only used if skip_formatting is true.
skip_processing:
type: boolean?
doc: If true, image processing step will be skipped.
default: false
clip_min:
type: float?
doc: Pixels below this percentile are set to 0.
clip_max:
type: float?
doc: Pixels above this percentile are set to 1.
level_method:
type: string?
doc: Levelling method for clip and scale application. Defaults to SCALE_BY_CHUNK.
register_aux_view:
type: string?
doc: The name of the auxillary view to be used for image registration.
register_to_primary:
type: boolean?
doc: If true, registration will be performed between the first round of register_aux_view and the primary view.
background_view:
type: string?
doc: The name of the auxillary view to be used for background subtraction. Background will be estimated if not provided.
register_background:
type: boolean?
doc: If true, `background_view` will be aligned to `aux_name`.
anchor_view:
type: string?
doc: The name of the auxillary view to be processed in parallel with primary view, such as for anchor round in ISS processing. Will not be included if not provided.
high_sigma:
type: int?
doc: Sigma value for high pass gaussian filter. Will not be run if not provided.
deconvolve_iter:
type: int?
doc: Number of iterations to perform for deconvolution. High values remove more noise while lower values remove less. The value 15 will work for most datasets unless image is very noisy. Will not be run if not provided.
deconvolve_sigma:
type: int?
doc: Sigma value for deconvolution. Should be approximately the expected spot size.
low_sigma:
type: int?
doc: Sigma value for low pass gaussian filter. Will not be run if not provided.
rolling_radius:
type: int?
doc: Radius for rolling ball background subtraction. Larger values lead to increased intensity evening effect. The value of 3 will work for most datasets. Will not be run if not provided.
match_histogram:
type: boolean?
doc: If true, histograms will be equalized.
tophat_radius:
type: int?
doc: Radius for white top hat filter. Should be slightly larger than the expected spot radius. Will not be run if not provided.
# starfishRunner
exp_loc:
type: Directory?
doc: Location of directory containing starfish experiment.json file. Only used when both skip_formatting and skip_processing are true.
use_ref_img:
type: boolean?
doc: Whether to generate a reference image and use it alongside spot detection.
is_volume:
type: boolean?
doc: Whether to treat the zplanes as a 3D image.
default: False
rescale:
type: boolean?
doc: Whether to rescale images before running decoding.
not_filtered_results:
type: boolean?
doc: Pipeline will not remove genes that do not match a target and do not meet criteria.
n_processes:
type: int?
doc: If provided, the number of processes that will be spawned for processing. Otherwise, the maximum number of available CPUs will be used.
scatter_into_n:
type: int?
doc: If provided, the step to run decoding will be split into n batches, where each batch is (FOV count/n) FOVs big.
decoding_blob:
- 'null'
- type: record
name: dummy
fields:
dummy:
type: string?
doc: Added to prevent cli parsing of the decoding_blob record.
- type: record
name: blob
fields:
min_sigma:
type: float[]?
doc: Minimum sigma tuple to be passed to blob detector
max_sigma:
type: float[]?
doc: Maximum sigma tuple to be passed to blob detector
num_sigma:
type: int?
doc: The number of sigma values to be tested, passed to blob detector
threshold:
type: float?
doc: Threshold of blob detection
overlap:
type: float?
doc: Amount of overlap allowed between blobs, passed to blob detector
detector_method:
type: string?
doc: Name of the scikit-image spot detection method to use
composite_decode:
type: boolean?
doc: Whether to composite all FOVs into one image, typically for PoSTcode decoding.
composite_pmin:
type: float?
doc: pmin value for clip and scale of composite image.
composite_pmax:
type: float?
doc: pmax value for clip and scale of composite image.
decode_method:
type: string
doc: Method name for spot decoding. Refer to starfish documentation.
decoder:
type:
- type: record
name: metric_distance
fields:
trace_building_strategy:
type: string
doc: Which tracing strategy to use. See starfish docs.
max_distance:
type: float
doc: Maximum distance between spots.
min_intensity:
type: float
doc: Minimum intensity of spots.
pnorm:
type: int?
doc: Which Minkowski p-norm to use. 1 is the sum-of-absolute-values “Manhattan” distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance A finite large p may cause a ValueError if overflow can occur.
norm_order:
type: int?
doc: Refer to starfish documentation for metric_distance
anchor_round:
type: int?
doc: Anchor round for comparison.
search_radius:
type: float?
doc: Distance to search for matching spots.
return_original_intensities:
type: boolean?
doc: Return original intensities instead of normalized ones.
- type: record
name: per_round_max
fields:
trace_building_strategy:
type: string
doc: Which tracing strategy to use. See starfish docs.
anchor_round:
type: int?
doc: Round to refer to. Required for nearest_neighbor.
search_radius:
type: float?
doc: Distance to search for matching spots.
- type: record
name: check_all
fields:
search_radius:
type: float?
doc: Distance to search for matching spots.
error_rounds:
type: int?
doc: Maximum hamming distance a barcode can be from its target and still be uniquely identified.
mode:
type: string?
doc: Accuracy mode to run in. Can be 'high', 'med', or 'low'.
physical_coords:
type: boolean?
doc: Whether to use physical coordinates or pixel coordinates
decoding_pixel:
- 'null'
- type: record
name: dummy
fields:
dummy:
type: string?
doc: Added to prevent cli parsing of decoding_pixel parameters.
- type: record
name: pixel
fields:
pnorm:
type: int?
doc: Which Minkowski p-norm to use. 1 is the sum-of-absolute-values “Manhattan” distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance A finite large p may cause a ValueError if overflow can occur.
distance_threshold:
type: float
doc: Spots whose codewords are more than this metric distance from an expected code are filtered
magnitude_threshold:
type: float
doc: spots with intensity less than this value are filtered.
min_area:
type: int?
doc: Spots with total area less than this value are filtered. Defaults to 2.
max_area:
type: int?
doc: Spots with total area greater than this value are filtered. Defaults to `np.inf`.
norm_order:
type: int?
doc: Order of L_p norm to apply to intensities and codes when using metric_decode to pair each intensities to its closest target (default = 2)
# segmentation
skip_seg:
type: boolean?
doc: If true, segmentation (and QC) will be skipped.
## cellpose-specific vars
run_cellpose:
type: boolean?
doc: If true, cellpose will be run.
use_mrna:
type: boolean?
doc: If true, mrna data will be used in cellpose calculations.
aux_views:
type: string[]?
doc: The views to use for cellpose segmentation.
pretrained_model_str:
type: string?
doc: Cellpose-provided model to use.
pretrained_model_dir:
type: File?
doc: Manually trained cellpose model to use.
diameter:
type: float?
doc: Expected diameter of cells. Should be 0 if a custom model is used.
flow_threshold:
type: float?
doc: threshold for filtering cell segmentations (increasing this will filter out lower confidence segmentations), range is 0 to infinity
stitch_threshold:
type: float?
doc: threshold for stitching together segmentations that occur at the same xy location but in adjacent z slices, range is 0 to 1. This should only be used when the image is 3D.
cellprob_threshold:
type: float?
doc: determines the extent of the segmentations (0 is the default more negative values result in larger cells, more positive values result in smaller cells), range is -6 to 6.
border_buffer:
type: int?
doc: If not None, removes cytoplasms whose nuclei lie within the given distance from the border.
label_exp_size:
type: int?
doc: Pixel size labels are dilated by in final step. Helpful for closing small holes that are common from thresholding but can also cause cell boundaries to exceed their true boundaries if set too high. Label dilation respects label borders and does not mix labels.
min_allowed_size:
type: int?
doc: minimum size for a cell (in pixels)
max_allowed_size:
type: int?
doc: maximum size for a cell (in pixels)
## built-in segmentation methods
aux_name:
type: string?
doc: The name of the aux view to look at in the experiment file for image segmentation.
binary_mask:
- 'null'
- type: record
name: roi_set
fields:
roi_set:
type: Directory
doc: Directory of RoiSet.zip for each fov, from fiji segmentation
file_formats:
type: string
doc: Layout for name of each RoiSet.zip, per fov. Will be formatted with String.format([fov index]).
- type: record
name: labeled_image
fields:
labeled_image:
type: Directory
doc: Directory of labeled images with image segmentation data, such as from ilastik classification.
file_formats_labeled:
type: string
doc: Layout for name of each labelled image. Will be formatted with String.format([fov index])
- type: record
name: basic_watershed
fields:
img_threshold:
type: float
doc: Global threshold value for images
min_dist:
type: int
doc: minimum distance (pixels) between distance transformed peaks
min_allowed_size:
type: int
doc: minimum size for a cell (in pixels)
max_allowed_size:
type: int
doc: maxiumum size for a cell (in pixels)
masking_radius:
type: int
doc: Radius for white tophat noise filter
- type: record
name: density_based
fields:
nuclei_view:
type: string
doc: Name of the auxillary view with nuclei data
cyto_seg:
type: boolean
doc: If true, the cytoplasm will be segmented
correct_seg:
type: boolean
doc: If true, suspected nuclei/cytoplasms that overlap will be removed.
border_buffer:
type: int
doc: If not None, removes cytoplasms whose nuclei lie within the given distance from the border.
area_thresh:
type: float
doc: Threshold used when determining if an object is one nucleus or two or more overlapping nuclei. Objects whose ratio of convex hull area to normal area are above this threshold are removed if the option to remove overlapping nuclei is set.
thresh_block_size:
type: int
doc: Size of structuring element for local thresholding of nuclei. If nuclei interiors aren't passing threshold, increase this value, if too much non-nuclei is passing threshold, lower it.
watershed_footprint_size:
type: int
doc: Size of structuring element for watershed segmentation. Larger values will segment the nuclei into larger objects and smaller values will result in smaller objects. Adjust according to nucleus size.
label_exp_size:
type: int
doc: Pixel size labels are dilated by in final step. Helpful for closing small holes that are common from thresholding but can also cause cell boundaries to exceed their true boundaries if set too high. Label dilation respects label borders and does not mix labels.
# QC
run_baysor:
type: boolean?
doc: If true, the baysor step will be run.
default: False
skip_qc:
type: boolean?
doc: If true, QC will not be run.
default: False
find_ripley:
type: boolean?
doc: If true, will run ripley K estimates to find spatial density measures. Can be slow.
default: False
save_pdf:
type: boolean?
doc: If true, will save graphical output to a pdf.
default: True
outputs:
1_Pseudosort:
type: Directory
outputSource: sorter/pseudosorted_dir
2_tx_converted:
type: Directory
outputSource: spaceTxConversion/spaceTx_converted
3_Processed:
type: Directory
outputSource: processing/processed_exp
4_Decoded:
type: Directory
outputSource: starfishRunner/decoded
5A_cellpose_input:
type: Directory
outputSource: cellpose/cellpose_input
5B_cellpose_output:
type: Directory
outputSource: cellpose/cellpose_output
5C_cellpose_filtered:
type: Directory
outputSource: cellpose/cellpose_filtered
5_Segmented:
type: Directory
outputSource: segmentation/segmented
6_Baysor:
type: Directory
outputSource: baysorStaged/baysor
7_QC:
type: Directory
outputSource: qc/qc_metrics
steps:
read_schema:
run:
class: CommandLineTool
baseCommand: cat
requirements:
DockerRequirement:
dockerPull: hubmap/starfish-custom:latest
ResourceRequirement:
ramMin: 1000
tmpdirMin: 1000
outdirMin: 1000
inputs:
schema:
type: string
inputBinding:
position: 1
outputs:
data:
type: stdout
in:
schema:
valueFrom: "/opt/pipeline.json"
out: [data]
stage:
run: steps/inputParser.cwl
in:
datafile: parameter_json
schema: read_schema/data
out: [run_baysor, aux_views, skip_formatting, skip_processing, register_aux_view, fov_positioning_x_locs, fov_positioning_x_shape, fov_positioning_x_voxel, fov_positioning_y_locs, fov_positioning_y_shape, fov_positioning_y_voxel, fov_positioning_z_locs, fov_positioning_z_shape, fov_positioning_z_voxel, run_cellpose, add_blanks, skip_seg, skip_qc]
when: $(inputs.datafile != null)
sizer:
run: steps/fileSizer.cwl
in:
exp_loc: exp_loc
input_dir: input_dir
tiffs: tiffs
example_dir:
valueFrom: |
${
if (inputs.exp_loc !== null) {
return inputs.exp_loc;
} else if(inputs.input_dir !== null) {
return inputs.input_dir;
} else {
return inputs.tiffs;
}
}
out: [dir_size]
sorter:
run: steps/sorter.cwl
in:
dir_size: sizer/dir_size
channel_yml: channel_yml
cycle_yml: cycle_yml
parameter_json: parameter_json
input_dir: tiffs
codebook:
source: [codebook, codebook_csv, codebook_json]
linkMerge: merge_flattened
valueFrom: |
${
if(self[0]){
return self[0];
} else if(self[1]){
return {csv: self[1]};
} else {
return {json: self[2]};
}
}
round_count: round_count
fov_count: fov_count
round_offset: round_offset
fov_offset: fov_offset
channel_offset: channel_offset
file_format: file_format
file_vars: file_vars
cache_read_order: cache_read_order
aux_tilesets: aux_tilesets
skip_formatting:
source: [stage/skip_formatting, skip_formatting]
valueFrom: |
${
if(self[0] || self[1]){
return true;
} else {
return false;
};
}
when: $(inputs.channel_yml != null && !inputs.skip_formatting)
out: [pseudosorted_dir]
stagedSorted:
run: steps/psortedDefaultParams.cwl
in:
channel_yml: channel_yml
exp_dir: sorter/pseudosorted_dir
parameter_json: parameter_json
aux_names:
source: aux_tilesets
valueFrom: |
${
if(self){
return self.aux_names;
} else {
return null;
}
}
cache_read_order: cache_read_order
channel_count: channel_count
aux_cache_read_order:
source: aux_tilesets
valueFrom: |
${
if(self) {
return self.aux_cache_read_order;
} else {
return null;
}
}
skip_formatting:
source: [stage/skip_formatting, skip_formatting]
valueFrom: |
${
if(self[0] || self[1]){
return true;
} else {
return false;
};
}
when: $(inputs.channel_yml != null && !inputs.skip_formatting)
out: [codebook, round_count, fov_count, channel_count, zplane_count, round_offset, fov_offset, channel_offset, zplane_offset, file_format, file_vars, cache_read_order, aux_names, aux_file_formats, aux_file_vars, aux_cache_read_order, aux_channel_count, aux_channel_slope, aux_channel_intercept]
spaceTxConversion:
run: steps/spaceTxConversion.cwl
in:
dir_size: sizer/dir_size
tiffs:
source: [sorter/pseudosorted_dir, tiffs]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
codebook:
source: [stagedSorted/codebook, codebook, codebook_csv, codebook_json]
linkMerge: merge_flattened
valueFrom: |
${
if(self[0]){
return {json: self[0]};
} else if(self[1]) {
return self[1];
} else if(self[2]) {
return {csv: self[2]};
} else {
return {json: self[3]};
}
}
parameter_json:
source: [parameter_json, sorter/pseudosorted_dir]
valueFrom: |
${
if(self[1]){
return null;
} else {
return self[0];
}
}
data_org_file: data_org_file
round_count:
source: [stagedSorted/round_count, round_count]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
zplane_count:
source: [stagedSorted/zplane_count, zplane_count]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
channel_count:
source: [stagedSorted/channel_count, channel_count]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
fov_count:
source: [stagedSorted/fov_count, fov_count]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
round_offset:
source: [stagedSorted/round_offset, round_offset]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
fov_offset:
source: [stagedSorted/fov_offset, fov_offset]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
channel_offset:
source: [stagedSorted/channel_offset, channel_offset]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
file_format:
source: [stagedSorted/file_format, file_format]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
file_vars:
source: [stagedSorted/file_vars, file_vars]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
cache_read_order:
source: [stagedSorted/cache_read_order, cache_read_order]
valueFrom: |
${
if(self[0]){
return self[0];
} else if (self[1]){
return self[1];
} else {
return null;
}
}
aux_tilesets:
source: [aux_tilesets, stagedSorted/aux_names, stagedSorted/aux_file_formats, stagedSorted/aux_file_vars, stagedSorted/aux_cache_read_order, stagedSorted/aux_channel_count, stagedSorted/aux_channel_slope, stagedSorted/aux_channel_intercept]
valueFrom: |
${
if(!self[1] && self[0]){
return {
aux_names: self[0].aux_names,
aux_file_formats: self[0].aux_file_formats,
aux_file_vars: self[0].aux_file_vars,
aux_cache_read_order: self[0].aux_cache_read_order,
aux_channel_count: self[0].aux_channel_count,
aux_channel_slope: self[0].aux_channel_slope,
aux_channel_intercept: self[0].aux_channel_intercept
};
} else if(self[1]) {
var count = self[5];
if(self[0] && self[0].aux_channel_count){
count = self[0].aux_channel_count;
}
return {
aux_names: self[1],
aux_file_formats: self[2],
aux_file_vars: self[3],
aux_cache_read_order: self[4],
aux_channel_count: count,
aux_channel_slope: self[6],
aux_channel_intercept: self[7]
};
} else {
return null;
}
}
fov_positioning:
source: [fov_positioning, stage/fov_positioning_x_locs, stage/fov_positioning_x_shape, stage/fov_positioning_x_voxel, stage/fov_positioning_y_locs, stage/fov_positioning_y_shape, stage/fov_positioning_y_voxel, stage/fov_positioning_z_locs, stage/fov_positioning_z_shape, stage/fov_positioning_z_voxel, locs_json]
valueFrom: |
${
if(self[1]) {
return {
x_locs: self[1],
x_shape: self[2],
x_voxel: self[3],
y_locs: self[4],
y_shape: self[5],
y_voxel: self[6],
z_locs: self[7],
z_shape: self[8],
z_voxel: self[9]
};
} else if (self[0]) {
return self[0];
} else if (self[10]) {
return {"locs": self[10]};
} else {
return null;
}
}
add_blanks:
source: [add_blanks, stage/add_blanks]
valueFrom: |
${
if(self[0]){
return self[0];
} else if(self[1]) {
return self[1];
} else {
return false;
}
}
skip_formatting:
source: [stage/skip_formatting, skip_formatting]
valueFrom: |
${
if(self[0] || self[1]){
return true;
} else {
return false;
};
}