-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmake_timeline.py
executable file
·1255 lines (1062 loc) · 40.5 KB
/
make_timeline.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
"""
Generate a timeline plot and associated JSON data for animated version.
This plot features the predicted ACIS attenuated fluence based on current ACIS orbital
fluence, 2hr average, and grating status. It also shows DSN comms, radiation zone
passages, instrument configuration.
Testing
=======
This section documents regression testing of the Replan Central timeline plot code. For
full testing of Replan Central including the ``arc.pl`` Perl script and other Python
scripts see: https://github.com/sot/arc/wiki/Set-up-test-Replan-Central.
First some setup which applies to testing both on HEAD and local::
cd ~/git/arc
rm -rf t_now
COMMIT=`git rev-parse --short HEAD`
PR=pr86 # or whatever
mkdir -p t_now/$COMMIT
mkdir -p t_now/flight
HEAD
-----
Testing on HEAD does an apples-to-apples comparison of the branch to the current flight
code, running both in "flight" mode::
# Current flight version of make_timeline.py looks for data in the output directory.
ln -s /proj/sot/ska/data/arc3/*.h5 t_now/flight/
ln -s /proj/sot/ska/data/arc3/ACE_hourly_avg.npy t_now/flight/
# Run PR branch version in "flight" mode (no --test), being explicit about data
# directory. This version looks for data resources in fixed locations, not in
# `data-dir`, so starting with an empty directory is fine.
python make_timeline.py --data-dir=t_now/$COMMIT
# Run flight version of make_timeline.py in a directory hidden from the git repo.
cd t_now
python /proj/sot/ska/share/arc3/make_timeline.py --data-dir=flight
cd ..
rsync -av t_now/ /proj/sot/ska/www/ASPECT_ICXC/test_review_outputs/arc/${PR}/t_now/
diff t_now/{flight,$COMMIT}/timeline_states.js
Local
-----
Testing on a local machine (Mac) requires syncing the data files from kady. This tests
that the branch produces the same output as the currently running flight arc cron job.
For testing on a local machine, the most straightforward option is syncing from HEAD. Do
all this in one copy/paste to reduce the chance that the cron job has run between
commands. The ``--test-get-web`` option in the last command is to grab the ACIS fluence,
ACE rates, and DSN comms from the web.
rsync -av kady:/proj/sot/ska/data/arc3/hrc_shield.h5 $SKA/data/arc3/
rsync -av kady:/proj/sot/ska/data/arc3/ACE.h5 $SKA/data/arc3/
rsync -av kady:/proj/sot/ska/data/arc3/GOES_X.h5 $SKA/data/arc3/
rsync -av kady:/proj/sot/ska/data/arc3/ACE_hourly_avg.npy $SKA/data/arc3/
rsync -av kady:/proj/sot/ska/www/ASPECT/arc3/ t_now/$COMMIT/
rsync -av kady:/proj/sot/ska/www/ASPECT/arc3/ t_now/flight/
ln -s $SKA/data/arc3/ACE_hourly_avg.npy t_now/$COMMIT/
ln -s $SKA/data/arc3/*.h5 t_now/$COMMIT/
python make_timeline.py --data-dir=t_now/$COMMIT --test-get-web
Get the flight run date and convert that to a full date-format date (e.g. "317/1211z"
=> "2024:317:12:11:00")::
DATE_NOW=`python utils/get_date_now.py t_now/flight`
Run the script with the test option::
python make_timeline.py --test --data-dir=t_now/$COMMIT --date-now=$DATE_NOW
To view the output, open the directory in a browser::
open t_now/$COMMIT/index.html
open t_now/flight/index.html
Compare the timeline_states.js files::
python utils/convert_states_to_yaml.py t_now/$COMMIT
python utils/convert_states_to_yaml.py t_now/flight
diff t_now/{flight,$COMMIT}/timeline_states.yaml
"""
import argparse
import functools
import io
import json
import os
import re
import sys
import warnings
from pathlib import Path
os.environ["MPLBACKEND"] = "Agg"
import astropy.units as u
import kadi.commands.states as kadi_states
import matplotlib.cbook
import matplotlib.patches
import matplotlib.pyplot as plt
import numpy as np
import ska_numpy
import tables
import yaml
from astropy.table import Table
from cxotime import CxoTime, CxoTimeLike
from kadi import events, occweb
from ska_matplotlib import lineid_plot, plot_cxctime
import calc_fluence_dist as cfd
warnings.filterwarnings("ignore", category=matplotlib.MatplotlibDeprecationWarning)
P3_BAD = -100000
AXES_LOC = [0.08, 0.15, 0.83, 0.6]
SKA = Path(os.environ["SKA"])
DATA_ARC3 = SKA / "data" / "arc3"
COMMS_AVAIL_URL = (
"https://occweb.cfa.harvard.edu/mission/MissionPlanning/DSN/DSN_Modifications.csv"
)
# Define HTML to support showing available comms as a table that is hidden by default.
# The table content is inserted between the two. In a nicer world this would be in a
# Jinja template, but let's keep the footprint small.
COMMS_AVAIL_HTML_HEADER = """
<script>
function toggleCommsAvail() {
var x = document.getElementById("timeline-comms-available");
var button = document.querySelector("button");
if (x.style.display === "none") {
x.style.display = "block";
button.textContent = "Hide available comms";
} else {
x.style.display = "none";
button.textContent = "Show available comms";
}
};
</script>
<br>
<div style="text-align: center;">
<button onclick="toggleCommsAvail()">Show available comms</button>
</div>
<div id="timeline-comms-available" style="display: none; font-family: monospace;">
<br>
<div style="display: flex; justify-content: center;">
"""
COMMS_AVAIL_HTML_FOOTER = """
</div>
</div>
"""
def cxc2pd(times: CxoTimeLike) -> float | np.ndarray:
"""
Convert CXC time(s) to matplotlib plot date(s).
This replaces the old ``ska_matplotlib.cxctime2plotdate`` function with a more
general version that accepts any CxoTimeLike object.
"""
return CxoTime(times).plot_date
def get_parser():
parser = argparse.ArgumentParser(
description="Make a timeline plot and associate table javascript"
)
parser.add_argument(
"--data-dir",
default="t_now",
help="Data directory (default=t_now)",
)
parser.add_argument(
"--hours",
default=72.0,
type=float,
help="Hours to predict (default=72)",
)
parser.add_argument(
"--dt",
default=300.0,
type=float,
help="Prediction time step (secs, default=300)",
)
parser.add_argument(
"--max-slope-samples",
type=int,
help="Max number of samples when filtering by slope (default=None",
)
parser.add_argument(
"--min-flux-samples",
default=100,
type=int,
help="Minimum number of samples when filtering by flux (default=100)",
)
parser.add_argument(
"--test",
action="store_true",
help=(
"Use test data in data_dir (default=False). This option adjusts times in "
"ACE, GOES, and HRC data to pretend it is up to date (if needed)."
),
)
parser.add_argument(
"--test-scenario",
type=int,
help="Name of a scenario for testing missing P3 data",
)
parser.add_argument(
"--test-get-web",
action="store_true",
help=(
"Grab ACIS fluence, ACE rates and DSN comms from web and store in "
"data_dir. This is a one-time operation to get the data for testing."
),
)
parser.add_argument(
"--date-now",
type=str,
help="Override the current time for testing (default=now)",
)
return parser
def arc_data_file(
data_dir_flight: str | Path,
filename: str,
data_dir_test: str | Path | None = None,
test: bool = False,
) -> Path:
"""Get a data file path from flight data directory or test directory.
This is intended to be used as a functools.partial function, see below.
"""
file_dir = Path(data_dir_test) if test else Path(data_dir_flight)
return file_dir / filename
# Define file path functions for this module
acis_fluence_file = functools.partial(
arc_data_file, "/proj/web-cxc/htdocs/acis/Fluence", "current.dat"
)
ace_rates_file = functools.partial(arc_data_file, "/data/mta4/www", "ace.html")
dsn_comms_file = functools.partial(
arc_data_file, SKA / "data" / "dsn_summary", "dsn_summary.yaml"
)
ace_hourly_avg_file = functools.partial(arc_data_file, DATA_ARC3, "ACE_hourly_avg.npy")
goes_x_h5_file = functools.partial(arc_data_file, DATA_ARC3, "GOES_X.h5")
ace_h5_file = functools.partial(arc_data_file, DATA_ARC3, "ACE.h5")
hrc_h5_file = functools.partial(arc_data_file, DATA_ARC3, "hrc_shield.h5")
comms_avail_file = functools.partial(arc_data_file, DATA_ARC3, "comms_avail.html")
def get_web_data(data_dir):
"""Get ACIS fluence, ACE rates, and DSN comms from CXC web pages
Output files are placed in ``data_dir``.
"""
import urllib.request
urls_file_funcs = [
("/acis/Fluence/current.dat", acis_fluence_file),
("/mta/ace.html", ace_rates_file),
("/mta/ASPECT/dsn_summary/dsn_summary.yaml", dsn_comms_file),
]
for url, file_path_func in urls_file_funcs:
urllib.request.urlretrieve(
"https://cxc.cfa.harvard.edu" + url, file_path_func(data_dir, test=True)
)
def get_fluence(filename):
"""
Get the current ACIS attenuated fluence (parse stuff below)::
TABLE 2: ACIS FLUX AND FLUENCE BASED ON ACE DATA
Latest valid ACIS flux and fluence data... 111...
# UT Date Time Julian of the --- Electron keV --- -------------------- Pr...
# YR MO DA HHMM Day Secs 38-53 175-315 56-78 112-187 3...
#-------------------------------------------------------------------------------...
2012 9 4 1915 56174 69300 5.54e+03 2.90e+01 5.92e+03 1.88e+03 4....
ACIS Fluence data...Start DOY,SOD
2012 9 4 1923 248 38580 1.73e+08 9.15e+05 1.39e+08 4.94e+07 1...
"""
lines = open(filename, "r").readlines()
vals = lines[-3].split()
mjd = float(vals[4]) + float(vals[5]) / 86400.0
start = CxoTime(mjd, format="mjd")
vals = lines[-1].split()
p3_fluence = float(vals[9])
return start, p3_fluence
def get_comms_avail(start: CxoTime, stop: CxoTime) -> Table | None:
"""Get the available DSN comms from OCCweb
Parameters
----------
start : CxoTime
Start time for the available DSN comms table
stop : CxoTime
Stop time for the available DSN comms table
Returns
-------
dat : Table | None
Table of available DSN comms, or None if URL could not be read
"""
try:
text = occweb.get_occweb_page(COMMS_AVAIL_URL)
if os.environ.get("ARC_TEST_SCENARIO") == "avail-comms-read-fail":
# Test failed read
raise Exception
except Exception:
# Return None, which gets handled in the downstream processing with a warning
# on the web page that the URL could not be read.
return None
dat = Table.read(text, format="ascii", fill_values=[("NaN", "0")])
# Deleted comms are ones that have been superseded by combined comms in this table.
datestart = start.date
datestop = stop.date
ok = (
(dat["avail_bot"] < datestop)
& (dat["avail_eot"] > datestart)
& (dat["type"] != "Deleted")
)
dat = dat[ok]
# Test that no comms are available in the range
if os.environ.get("ARC_TEST_SCENARIO") == "avail-comms-zero-len":
dat = dat[0:0]
return dat["station", "avail_bot", "avail_eot", "avail_soa", "avail_eoa"]
def get_avg_flux(
filename,
data_dir: str | Path | None = None,
test: bool = False,
) -> float:
"""
# Get the ACE 2 hour average flux (parse stuff below)
DE1 DE4 P2 P3 P3S..
38-53 175-315 47-68 115-195 11..
AVERAGE 28126.087 147.783 32152.174 10211.739 16480..
MINIMUM 26400.000 137.000 29400.000 9310.000 15260..
FLUENCE 2.0251e+08 1.0640e+06 2.3150e+08 7.3525e+07 1.1866..
"""
lines = [line for line in open(filename, "r") if line.startswith("AVERAGE ")]
if len(lines) != 1:
print(
(
"WARNING: {} file contains {} lines that start with "
"AVERAGE (expect one)".format(
ace_rates_file(data_dir, test), len(lines)
)
)
)
p3_avg_flux = P3_BAD
else:
p3_avg_flux = float(lines[0].split()[4])
return p3_avg_flux
def get_radzones():
"""
Constuct a list of complete radiation zones using kadi events
"""
radzones = events.rad_zones.filter(start=CxoTime() - 5 * u.day, stop=None)
return [(x.start, x.stop) for x in radzones]
def get_comms(
data_dir: str | Path | None = None,
test: bool = False,
) -> list:
"""
Get the list of comm passes from the DSN summary file.
"""
dat = yaml.safe_load(open(dsn_comms_file(data_dir, test), "r"))
return dat
def zero_fluence_at_radzone(times, fluence, radzones):
"""
Zero the fluence estimate at the start of each radzone.
For the given ``fluence`` estimate which is sampled at ``times``, reset the fluence
to zero at the start of each of the ``radzones``.
This works on ``fluence`` in place.
"""
for radzone in radzones:
t0, t1 = CxoTime(radzone).secs
ok = (times > t0) & (times <= t1)
if np.any(ok):
idx0 = np.flatnonzero(ok)[0]
fluence[idx0:] -= fluence[idx0]
def calc_fluence(times, fluence0, rates, states):
"""
Calculate the fluence based on the current fluence, rates, and grating states.
For the given starting ``fluence0`` (taken from the current ACIS ops estimate) and
predicted P3 ``rates`` and grating ``states``, return the integrated fluence.
"""
for state in states:
ok = (state["tstart"] < times) & (times < state["tstop"])
if state["simpos"] < 40000:
rates[ok] = 0.0
if state["hetg"] == "INSR":
rates[ok] = rates[ok] / 5.0
if state["letg"] == "INSR":
rates[ok] = rates[ok] / 2.0
fluence = (fluence0 + np.cumsum(rates)) / 1e9
return fluence
def get_h5_data(h5_file, col_time, col_values, start, stop, test=False):
"""
Get data from an HDF5 file and return the time and values within the time range.
"""
tstart = CxoTime(start).secs
tstop = CxoTime(stop).secs
with tables.open_file(h5_file) as h5:
times = h5.root.data.col(col_time)
values = h5.root.data.col(col_values)
# If testing, it is common to have the test data file not be updated to the current
# time. In that case, just hack the times to seem current.
if test and (dt := tstop - times[-1]) > 3600:
times += dt
ok = (tstart < times) & (times <= tstop)
return times[ok], values[ok]
def get_ace_p3(
tstart: float,
tstop: float,
data_dir: str | Path | None = None,
test: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""
Get the historical ACE P3 rates and filter out bad values.
"""
times, vals = get_h5_data(
ace_h5_file(data_dir, test), "time", "p3", tstart, tstop, test
)
return times, vals
def get_goes_x(
tstart: float,
tstop: float,
data_dir: str | Path | None = None,
test: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""
Get recent GOES 1-8 angstrom X-ray rates
Returns
-------
times : np.ndarray
Times of X-ray data
xray_long : np.ndarray
X-ray flux
"""
times, vals = get_h5_data(
goes_x_h5_file(data_dir, test), "time", "long", tstart, tstop, test
)
return times, vals
def get_p3_test_vals(scenario, p3_times, p3s, p3_avg, p3_fluence):
if scenario == 1:
# ACE unavailable for the last 8 hours. No P3 avg from MTA.
print("Running test scenario 1")
p3s[:] = 100.0
bad = p3_times[-1] - p3_times < 3600 * 8
p3s[bad] = P3_BAD
p3_avg = P3_BAD
p3_fluence = 0.3e9
elif scenario == 2:
# ACE data flow resumes but MTA does not report an average
# value. Expect ACE fluence to show as red because no P3 avg
# is available for fluence calculation.
print("Running test scenario 2")
p3s[:] = 5000
dt = (p3_times[-1] - p3_times) / 3600.0
bad = (dt < 8) & (dt > 2.5)
p3s[bad] = P3_BAD
p3_avg = P3_BAD
p3_fluence = 0.3e9
elif scenario == 3:
# ACE data flow resumes and MTA reports an average value
# and there is now enough data for a slope and fluence quantiles
print("Running test scenario 3")
p3s[:] = 5000
dt = (p3_times[-1] - p3_times) / 3600.0
bad = (dt < 8) & (dt > 2.5)
p3_avg = 5000
p3s[bad] = P3_BAD
p3_fluence = 0.0e9
elif scenario == 4:
# ACE data flow resumes and MTA reports an average value,
# but not enough data for a slope and fluence quantiles
print("Running test scenario 4")
p3s[:] = 5000
dt = (p3_times[-1] - p3_times) / 3600.0
bad = (dt < 8) & (dt > 0.5)
p3_avg = 5000
p3s[bad] = P3_BAD
p3_fluence = 0.0e9
elif scenario == 5:
# ACE completely unavailable during period. No P3 avg from MTA.
print("Running test scenario 5")
p3s[:] = P3_BAD
p3_avg = P3_BAD
p3_fluence = 0.3e9
elif scenario == 6:
# Random ACE outages
print("Running test scenario 6")
bad = np.random.uniform(size=len(p3s)) < 0.05
p3s[:] = 1000.0
p3s[bad] = P3_BAD
p3_avg = 1000.0
p3_fluence = 0.3e9
return p3s, p3_avg, p3_fluence
def get_hrc(
tstart,
tstop,
data_dir: str | Path | None = None,
test: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""
Get the historical HRC proxy rates and filter out bad values.
"""
times, vals = get_h5_data(
hrc_h5_file(data_dir, test), "time", "hrc_shield", tstart, tstop, test
)
return times, vals * 256.0
def plot_multi_line(x, y, z, bins, colors, ax):
"""
Plot a multi-color line.
See:
http://matplotlib.sourceforge.net/examples/pylab_examples/multicolored_line.html
"""
from matplotlib.collections import LineCollection
from matplotlib.colors import BoundaryNorm, ListedColormap
# If there are the same number of bins as colors, infer that the
# bins are supplied as bin centers, and calculate the edges.
if len(bins) == len(colors):
bins = np.array(bins, dtype=float)
centers = (bins[1:] + bins[:-1]) / 2.0
bins = np.concatenate([[centers[0] - 1], centers, [centers[-1] + 1]])
cmap = ListedColormap(colors)
norm = BoundaryNorm(bins, cmap.N)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(z)
lc.set_linewidth(3)
ax.add_collection(lc)
def get_p3_slope(p3_times, p3_vals):
"""
Compute the slope (log10(p3) per hour) of the last 6 hours of ACE P3 values.
"""
ok = (
p3_times[-1] - p3_times
) < 6 * 3600 # Points within 6 hrs of last available data
ok = ok & (p3_vals > 0) # Good P3 values
slope = None
if np.sum(ok) > 4:
x = (p3_times[ok] - p3_times[-1]) / 3600
if x[-1] - x[0] > 2:
y = np.log10(p3_vals[ok])
slope = np.polyfit(x, y, 1)[0]
return slope
def main(args_sys=None):
"""
Generate the Replan Central timeline plot.
"""
parser = get_parser()
args = parser.parse_args(args_sys)
if args.test_get_web:
get_web_data(args.data_dir)
sys.exit(0)
# Basic setup. Set times and get input states, radzones and comms.
now = CxoTime(args.date_now)
now = CxoTime(now.date[:14] + ":00") # truncate to 0 secs
start = now - 1.0 * u.day
stop = start + args.hours * u.hour
# NOTE: for some reason django messes with the time zone, despite the environment
# variable TZ hack in kadi/events/models.py. Once `get_radzones()` is run on HEAD
# linux then the local timezone is set to Chicago, so we need to run
# get_comms_avail_for_humans() before that.
comms_avail = get_comms_avail(now, stop)
comms_avail_humans = get_comms_avail_for_humans(comms_avail)
states = kadi_states.get_states(start=start, stop=stop)
radzones = get_radzones()
comms = get_comms()
# Get the ACIS ops fluence estimate and current 2hr avg flux
fluence_date, fluence0 = get_fluence(
acis_fluence_file(args.data_dir, test=args.test)
)
fluence_date = max(fluence_date, now)
avg_flux = get_avg_flux(ace_rates_file(args.data_dir, test=args.test))
# Get the realtime ACE P3 and HRC proxy values over the time range
goes_x_times, goes_x_vals = get_goes_x(start, now, args.data_dir, args.test)
p3_times, p3_vals = get_ace_p3(start, now, args.data_dir, args.test)
hrc_times, hrc_vals = get_hrc(start, now, args.data_dir, args.test)
# For testing: inject predefined values for different scenarios
if args.test_scenario:
p3_vals, avg_flux, fluence0 = get_p3_test_vals(
args.test_scenario, p3_times, p3_vals, avg_flux, fluence0
)
# Compute the predicted fluence based on the current 2hr average flux.
fluence_times = np.arange(fluence_date.secs, stop.secs, args.dt)
rates = np.ones_like(fluence_times) * max(avg_flux, 0.0) * args.dt
fluence = calc_fluence(fluence_times, fluence0, rates, states)
zero_fluence_at_radzone(fluence_times, fluence, radzones)
# Initialize the main plot figure
fig = plt.figure(1, figsize=(9, 5))
fig.patch.set_alpha(0.0)
ax = fig.add_axes(AXES_LOC, facecolor="w")
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.yaxis.set_offset_position("right")
ax.patch.set_alpha(1.0)
draw_ace_yellow_red_limits(fluence_times, ax)
draw_dummy_lines_letg_hetg_legend(fluence_times, fig, ax)
draw_fluence_and_grating_state_line(states, fluence_times, fluence, ax)
draw_fluence_percentiles(
args, states, radzones, fluence0, avg_flux, p3_times, p3_vals, fluence_times, ax
)
x0, x1, y0, y1 = set_plot_x_y_axis_limits(start, stop, ax)
id_xs, id_labels, next_comm = draw_communication_passes(
now, comms, ax, x0, x1, y0, y1
)
draw_radiation_zones(start, stop, radzones, ax, y0, y1)
draw_now_line(now, y0, y1, id_xs, id_labels, ax)
add_labels_for_obsids(start, states, id_xs, id_labels)
ax.grid()
ax.set_ylabel("Attenuated fluence / 1e9")
ax.legend(loc="upper center", labelspacing=0.15, fontsize=10)
# NOTE: this function call must be BEFORE any functions that add ax.text() labels to
# the plot. For some reason if there are ax.text() labels you get an exception:
# box.xyann = (wlp[i], box.xyann[1])
# ^^^^^^^^^
# AttributeError: 'Text' object has no attribute 'xyann'
lineid_plot.plot_line_ids(
cxc2pd([start.secs, stop.secs]),
[y1, y1],
id_xs,
id_labels,
ax=ax,
box_axes_space=0.14,
label1_size=10,
)
draw_comms_avail(comms_avail, ax, x0, x1)
draw_goes_x_data(goes_x_times, goes_x_vals, ax)
draw_ace_p3_and_limits(now, start, p3_times, p3_vals, ax)
draw_hrc_proxy(hrc_times, hrc_vals, ax)
draw_hrc_acis_states(start, stop, states, ax, x0, x1)
# Draw log scale y-axis on left
draw_log_scale_axes(fig, y0, y1)
fig.savefig(os.path.join(args.data_dir, "timeline.png"))
write_states_json(
os.path.join(args.data_dir, "timeline_states.js"),
fig,
ax,
states,
start,
stop,
now,
next_comm,
fluence,
fluence_times,
p3_vals,
p3_times,
avg_flux,
hrc_vals,
hrc_times,
)
write_comms_avail(
comms_avail_humans, comms_avail_file(args.data_dir, test=args.test)
)
def draw_log_scale_axes(fig, y0, y1):
ax2 = fig.add_axes(AXES_LOC, facecolor="w", frameon=False)
ax2.set_autoscale_on(False)
ax2.xaxis.set_visible(False)
ax2.set_xlim(0, 1)
ax2.set_yscale("log")
ax2.set_ylim(np.power(10.0, np.array([y0, y1]) * 2 + 1))
ax2.set_ylabel("ACE flux / HRC proxy / GOES X-ray")
ax2.text(-0.015, 2.5e3, "M", ha="right", color="m", weight="demibold")
ax2.text(-0.015, 2.5e4, "X", ha="right", color="m", weight="semibold")
# Draw dummy lines off the plot for the legend
lx = [0, 1]
ly = [1, 1]
ax2.plot(lx, ly, "-k", lw=3, label="ACE")
ax2.plot(lx, ly, "-c", lw=3, label="HRC")
ax2.plot(lx, ly, "-m", lw=3, label="GOES-X")
ax2.legend(loc="upper left", labelspacing=0.15, fontsize=10)
def draw_hrc_acis_states(start, stop, states, ax, x0, x1):
times = np.arange(start.secs, stop.secs, 300)
state_vals = kadi_states.interpolate_states(states, times)
y_si = -0.23
x = cxc2pd(times)
y = np.zeros_like(times) + y_si
z = np.zeros_like(times, dtype=float) # 0 => ACIS
z[state_vals["simpos"] < 0] = 1.0 # HRC
plot_multi_line(x, y, z, [0, 1], ["c", "r"], ax)
dx = (x1 - x0) * 0.01
ax.text(x1 + dx, y_si, "HRC/ACIS", ha="left", va="center", size="small")
def draw_comms_avail(comms_avail: Table | None, ax, x0, x1):
"""Draw available comms as a gray strip below the ACE/HRC multi-line plot"""
if comms_avail is None:
# Could not read URL, just make an empty list and carry on.
comms_avail = []
y_comm0 = -0.38
dy_comm = 0.05
dx = (x1 - x0) * 0.01
# Draw comm passes
for comm in comms_avail:
pd0, pd1 = cxc2pd([comm["avail_bot"], comm["avail_eot"]])
patch = matplotlib.patches.Rectangle(
(pd0, y_comm0),
pd1 - pd0,
dy_comm,
alpha=0.5,
facecolor="k",
edgecolor="none",
)
ax.add_patch(patch)
ax.text(x1 + dx, y_comm0, "Avail comms", ha="left", va="center", size="small")
def draw_hrc_proxy(hrc_times, hrc_vals, ax):
pd = cxc2pd(hrc_times)
lhrc = log_scale(hrc_vals)
ax.plot(pd, lhrc, "-c", alpha=0.3, lw=3)
ax.plot(pd, lhrc, ".c", mec="c", ms=3)
def draw_ace_p3_and_limits(now, start, p3_times, p3_vals, ax):
lp3 = log_scale(p3_vals)
pd = cxc2pd(p3_times)
ox = cxc2pd([start.secs, now.secs])
oy1 = log_scale(12000.0)
ax.plot(ox, [oy1, oy1], "--b", lw=2)
oy1 = log_scale(55000.0)
ax.plot(ox, [oy1, oy1], "--r", lw=2)
ax.plot(pd, lp3, "-k", alpha=0.3, lw=3)
ax.plot(pd, lp3, ".k", mec="k", ms=3)
def draw_goes_x_data(goes_x_times, goes_x_vals, ax):
pd = cxc2pd(goes_x_times)
lgoesx = log_scale(goes_x_vals * 1e8)
ax.plot(pd, lgoesx, "-m", alpha=0.3, lw=1.5)
ax.plot(pd, lgoesx, ".m", mec="m", ms=3)
def add_labels_for_obsids(start, states, id_xs, id_labels):
id_xs.extend(cxc2pd([start.secs]))
id_labels.append(str(states[0]["obsid"]))
for s0, s1 in zip(states[:-1], states[1:], strict=False):
if s0["obsid"] != s1["obsid"]:
id_xs.append(cxc2pd([s1["tstart"]])[0])
id_labels.append(str(s1["obsid"]))
def draw_now_line(now, y0, y1, id_xs, id_labels, ax):
ax.plot([now.plot_date, now.plot_date], [y0, y1], "-g", lw=4)
id_xs.extend(cxc2pd([now.secs]))
id_labels.append("NOW")
def draw_radiation_zones(start, stop, radzones, ax, y0, y1):
for rad0, rad1 in radzones:
t0 = CxoTime(rad0)
t1 = CxoTime(rad1)
if t0 < stop and t1 > start:
t0 = max(t0, start)
t1 = min(t1, stop)
pd0, pd1 = t0.plot_date, t1.plot_date
p = matplotlib.patches.Rectangle(
(pd0, y0),
pd1 - pd0,
y1 - y0,
alpha=0.2,
facecolor="b",
edgecolor="none",
)
ax.add_patch(p)
def draw_communication_passes(now, comms, ax, x0, x1, y0, y1):
id_xs = []
id_labels = []
# Draw comm passes
next_comm = None
for comm in comms:
pd0, pd1 = cxc2pd([comm["bot_date"]["value"], comm["eot_date"]["value"]])
if pd1 >= x0 and pd0 <= x1:
p = matplotlib.patches.Rectangle(
(pd0, y0),
pd1 - pd0,
y1 - y0,
alpha=0.2,
facecolor="r",
edgecolor="none",
)
ax.add_patch(p)
id_xs.append((pd0 + pd1) / 2)
id_labels.append(
"{}:{}".format(
comm["station"]["value"][4:6], comm["track_local"]["value"][:9]
)
)
if next_comm is None and CxoTime(comm["bot_date"]["value"]) > now:
next_comm = comm
return id_xs, id_labels, next_comm
def draw_fluence_percentiles(
args, states, radzones, fluence0, avg_flux, p3_times, p3_vals, fluence_times, ax
):
"""Plot 10, 50, 90 percentiles of fluence"""
try:
if len(p3_times) < 4:
raise ValueError("not enough P3 values")
p3_slope = get_p3_slope(p3_times, p3_vals)
if p3_slope is not None and avg_flux > 0:
p3_fits, p3_samps, fluences = cfd.get_fluences(
ace_hourly_avg_file(args.data_dir, test=args.test),
)
hrs, fl10, fl50, fl90 = cfd.get_fluence_percentiles(
avg_flux,
p3_slope,
p3_fits,
p3_samps,
fluences,
args.min_flux_samples,
args.max_slope_samples,
)
fluence_hours = (fluence_times - fluence_times[0]) / 3600.0
for fl_y, linecolor in zip(
(fl10, fl50, fl90), ("-g", "-b", "-r"), strict=False
):
fl_y = ska_numpy.interpolate(fl_y, hrs, fluence_hours) # noqa: PLW2901
rates = np.diff(fl_y)
fl_y_atten = calc_fluence(fluence_times[:-1], fluence0, rates, states)
zero_fluence_at_radzone(fluence_times[:-1], fl_y_atten, radzones)
ax.plot(
cxc2pd(fluence_times[0]) + fluence_hours[:-1] / 24.0,
fl_y_atten,
linecolor,
)
except Exception as e:
print(("WARNING: p3 fluence not plotted, error : {}".format(e)))
def set_plot_x_y_axis_limits(start, stop, ax):
x0, x1 = start.plot_date, stop.plot_date
ax.set_xlim(x0, x1)
y0 = -0.45
y1 = 2.55
ax.set_ylim(y0, y1)
return x0, x1, y0, y1
def draw_fluence_and_grating_state_line(states, fluence_times, fluence, ax):
"""Make a z-valued curve where the z value corresponds to the grating state."""
x = cxc2pd(fluence_times)
y = fluence
z = np.zeros(len(fluence_times), dtype=int)
for state in states:
ok = (state["tstart"] < fluence_times) & (fluence_times <= state["tstop"])
if state["hetg"] == "INSR":
z[ok] = 1
elif state["letg"] == "INSR":
z[ok] = 2
plot_multi_line(x, y, z, [0, 1, 2], ["k", "r", "c"], ax)
def draw_dummy_lines_letg_hetg_legend(fluence_times, fig, ax):
lx = [fluence_times[0], fluence_times[-1]]
ly = [-1, -1]
plot_cxctime(lx, ly, "-k", lw=3, label="None", fig=fig, ax=ax)
plot_cxctime(lx, ly, "-r", lw=3, label="HETG", fig=fig, ax=ax)
plot_cxctime(lx, ly, "-c", lw=3, label="LETG", fig=fig, ax=ax)
def draw_ace_yellow_red_limits(fluence_times, ax):
# Plot lines at 1.0 and 2.0 (10^9) corresponding to fluence yellow
# and red limits. Also plot the fluence=0 line in black.
x0, x1 = cxc2pd([fluence_times[0], fluence_times[-1]])
ax.plot([x0, x1], [0.0, 0.0], "-k") # ?? I don't see this line
ax.plot([x0, x1], [1.0, 1.0], "--b", lw=2.0)
ax.plot([x0, x1], [2.0, 2.0], "--r", lw=2.0)
def get_si(simpos):
"""
Get SI corresponding to the given SIM position.
"""
if (simpos >= 82109) and (simpos <= 104839):
si = "ACIS-I"
elif (simpos >= 70736) and (simpos <= 82108):
si = "ACIS-S"
elif (simpos >= -86147) and (simpos <= -20000):
si = " HRC-I"
elif (simpos >= -104362) and (simpos <= -86148):
si = " HRC-S"
else:
si = " NONE"
return si
def date_to_zulu(date):
"""
Convert a date string to a Zulu time string.
"""
return CxoTime(date).date[9:14].replace(":", "")
def get_comms_avail_for_humans(comms_avail: Table | None) -> Table | None:
"""Make table of available communication passes with human-readable fields.
This converts ``comms_avail`` from get_comms_avail() to the nicer format.
Support (GMT) BOT EOT Station Site Track time (local)
--------------- ---- ---- --------- --------- -------------------------
318/1345-1600 1445 1545 DSS-24 GOLDSTONE 0945-1045 EST, Wed 13 Nov
318/2115-0000 2215 2345 DSS-26 GOLDSTONE 1715-1845 EST, Wed 13 Nov
319/1100-1315 1200 1300 DSS-54 MADRID 0700-0800 EST, Thu 14 Nov
"""
# Could not read comms avail URL so just pass back None. This is handled later.
if comms_avail is None: