-
Notifications
You must be signed in to change notification settings - Fork 0
/
MCMU.py
1572 lines (1128 loc) · 54.6 KB
/
MCMU.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import re
from os import getcwd, chdir, makedirs, listdir, remove
from os.path import join, exists
from numpy import random as rd, empty, format_float_scientific as f_f, zeros, nonzero, divide, sqrt, seterr
from math import log, ceil
from tempfile import NamedTemporaryFile as NTF
from subprocess import Popen, DEVNULL
from matplotlib import pyplot as plt
from shutil import rmtree
import time
import logging
from random import randint, shuffle
from scipy import stats
import glob
from concurrent import futures
from multiprocessing import cpu_count
from sys import argv
from progress.bar import Bar
__author__ = "Rhys Doyle"
__copyright__ = "Copyright 2018, LTP Uncertainty Calculator"
__credits__ = ["Prof. Miles Turner", "Dublin City University"]
__maintainer__ = "Rhys Doyle"
__email__ = "[email protected]"
__status__ = "Testing"
def run_program():
with open(str(argv[1]), 'r') as f:
lines = f.readlines()
program = lines[9].split()[2]
subject = lines[10].split()[2]
data = lines[11].split()[2]
uncert_set = lines[12].split()[2::]
runs = int(lines[13].split()[2])
species = lines[14].split()[2]
num_EN_values = int(lines[15].split()[2])
bolsig = lines[16].split()[2]
try:
num_cpus = int(lines[17].split()[2])
except ValueError:
num_cpus = cpu_count()
p_values = int(lines[20].split()[2])
if program == '1':
Monte_carlo(subject, data, uncert_set, runs, species, num_EN_values, bolsig, num_cpus)
elif program == '2':
Morris(subject, data, uncert_set, runs, species, num_EN_values, bolsig, num_cpus, p_values)
else:
print("A valid program code was not written in your input file. Please check and try again.")
return
"""
Monte-Carlo Simulation Code:
*** This code was created to determine the possible uncertainties associated with plasma transport coefficients
using the experimental uncertainties associated with the cross-sections used ***
"""
# Run function for Monte-Carlo simulations
def Monte_carlo(subject, data, uncert_set, runs, species, num_EN_values, bolsig, num_cpus):
start = time.time()
og_cwd = getcwd()
cwd = changedir(og_cwd, subject)
cwd2 = join(cwd, 'Monte-Carlo')
# Checking if directory exists, if yes remove and replace with clean directory
if exists(cwd2):
rmtree(cwd2)
# Making and navigating to a new directory to store the new files
makedirs(cwd2)
elif not exists(cwd2):
makedirs(cwd2)
logging.basicConfig(filename=join(cwd2, "program.log"), filemode='w', level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
linelist = data_set(data)
startsec, dashsec = search_set(linelist)
com_format(startsec, dashsec, linelist, data)
linelist = data_set(data)
startsec, dashsec = search_set(linelist)
commsec = comment_sec(startsec, dashsec, linelist)
lognorm_set = lognorm(uncert_set, runs)
cs_data = find_num(startsec, dashsec, linelist)
inputdir = data_edit(cs_data, lognorm_set, data, cwd, dashsec, runs, commsec, cwd2)
logging.info('Data files edited')
outputdir = bolsig_file(inputdir, cwd2, og_cwd, species, num_EN_values)
logging.info('BOLSIG run files created')
logging.info('BOLSIG- running')
bolsig_minus(cwd2, bolsig, num_cpus)
logging.info('BOLSIG- completed')
red_e_field, gen_mat, e_loss_dict, rate_dict, name_rate, name_energy, titlelist, num_e = output_file_read(outputdir, startsec, runs, num_EN_values)
stats = gen_stats(gen_mat, runs)
rate_stats = rate_cof(rate_dict, name_rate, runs, num_e)
e_loss_stats = energy_loss(e_loss_dict, name_energy, runs, num_e)
logging.info('Stats Generated')
stats_file(red_e_field, stats, rate_stats, e_loss_stats, name_rate, name_energy, titlelist)
logging.info('Stats file created')
# graphing_data(red_e_field, stats, rate_stats, e_loss_stats, name_rate, name_energy)
end = time.time()
print(end - start)
# Changing directory for data sets
def changedir(path, subject):
# Changing main directory to above location
chdir(join(path, subject))
# Displaying new main directory
cwd = getcwd()
return cwd
# Importing data file to program
def data_set(data):
# Opening data file in a Read/Write capacity
with open(data, "r+", encoding='utf8') as datafile:
# split the file content into lines and save a list
linelist = datafile.read().splitlines()
return linelist
# Searching Data set for locations of COMMENT lines of different Cross Sections
def search_set(linelist):
# Section words
items = ['EXCITATION', 'ELASTIC', 'IONIZATION', 'EFFECTIVE', 'ROTATIONAL', 'ATTACHMENT']
# Find how many lines in the data set
x = len(linelist)
# Initial value
y = 0
# Lists to save the locations of the start of each data set and their corresponding COMMENT section
startsec = []
dashsec = []
while y <= x:
# Calling each line individually
line = str(linelist[y:y + 1])
# Looking for the beginning of a data set
if any(word in line for word in items):
startsec.append(y)
y += 1
# Looking for the Column section
elif line.find('---------------') != -1:
dashsec.append(y)
y += 1
else:
y += 1
return startsec, dashsec
# FUnction to search for Comment sections and add those that do not have them
def com_format(startsec, dashsec, linelist, data):
# Variable to store the location of lines containing param.:
paramline = []
p_location = 0
# Loop to check is the data set has comment sections
for i in range(0, (len(startsec))):
x = startsec[i]
y = dashsec[i * 2]
while x <= y:
line = str(linelist[x:x + 1])
# checking line for the word comment
if line.find('COMMENT:') != -1:
x += 1
break
# If comment isn't present checking for param.:
elif line.find('PARAM.:') != -1:
p_location = x
x += 1
# Continuation of loop
else:
x += 1
# If no COMMENT: was found this will store the location of PARAM.:
if x == y:
paramline.append(p_location)
else:
continue
# Writing changes to the existing file
out_file = []
for i in range(0, (len(linelist))):
# This will add a comment line after param.: to any data set without one
if i in paramline:
out_file.append(linelist[i] + '\nCOMMENT:\n')
else:
out_file.append(linelist[i] + '\n')
with open(data, 'w', encoding='utf-8') as f:
f.writelines(out_file)
return
# Function to collect all comment sections
def comment_sec(startsec, dashsec, linelist):
# Array to store comment line locations
commsec = []
# Loop to find and store the comment lines
for i in range(0, (len(startsec))):
x = startsec[i]
y = dashsec[i * 2]
while x <= y:
line = str(linelist[x:x + 1])
# Looking for comment in the selected line
if line.find('COMMENT:') != -1:
# If the word comment was found storing the line number
commsec.append(x)
x += 1
break
else:
x += 1
return commsec
# Generating a set amount of numbers from a lognormal distribution of specified parameters
def lognorm(uncert_set, num_values):
# Distribution Parameters ( MEan and Variance)
mean = 1
lognorm_set = []
for i in range(len(uncert_set)):
var = (float(uncert_set[i]))**2
# Calcualting std from a specified formula
sigma = sqrt(log(1 + (var / (mean ** 2))))
# Calcualting mean from a specified formula
mu = log(mean ** 2 / sqrt(var + (mean ** 2)))
# Gemerating the random numbers
uncert = rd.lognormal(mu, sigma, num_values)
lognorm_set.append(uncert)
return lognorm_set
# Finding and extracting each individual data set
def find_num(startsec, dashsec, linelist):
# Dictionary to collect the data for each Cross Section
cs_data = {}
progressbar = Bar('Extracting Data ', max=len(startsec), suffix='%(percent)d%%')
# Iteratively adding to the above dictionary
for i in range(len(startsec)):
cs_name = linelist[startsec[i] + 1]
cs_start = dashsec[i * 2] + 1
cs_end = dashsec[(i * 2) + 1]
cs_set = linelist[cs_start:cs_end]
cs_data[(str(cs_name) + ", No." + str(i))] = cs_set
progressbar.next()
progressbar.finish()
return cs_data
# Using the generated random numbers to create an equal amount of perturbed data files
def data_edit(cs_data, lognorm_set, oldfile, cwd, dashsec, num_uncert, comsec, cwd2):
# Importing the list of cross-section data from the dictionary created earlier
cross_values = list(cs_data.values())
# Creating a second variable of the same list to reset the first when altered
reset = cross_values.copy()
chdir(cwd2)
cwd3 = join(cwd2, 'Input_Files')
makedirs(cwd3)
progressbar = Bar('Generating Datasets ', max=num_uncert, suffix='%(percent)d%%')
# For loop for each random number
for j in range(num_uncert):
# Reseting the variables
cross_values = reset.copy()
# Opening the currently used data file in order to read write equivalent files
with open(join(cwd, oldfile), 'r', encoding='utf8') as old_data:
# read lines into a list
oldlinelist = old_data.read().splitlines()
# Iterating over each set of Cross section data
for i in range(len(cross_values)):
# Selecting 1 set of data at a time
dataset = cross_values[i].copy()
# Iterating over each cross section individually
for k in range(len(dataset)):
# Splitting the Energy and Cross-Section numbers
E, cs = dataset[k].split('\t')
# Perturbing the Cross section number by the jth generated number
cs = (float(cs) * lognorm_set[i][j])
# Creating a new data line with the new cross section number
newdata = (E + '\t' + str(cs))
# Updating the list
dataset[k] = newdata
# Updating the entire data set
cross_values[i] = dataset.copy()
# Adding the newly perturbed data lines to the main list
for x, y in zip(range((dashsec[i * 2] + 1), (dashsec[(i * 2) + 1])), range(len(dataset))):
oldlinelist[x] = dataset[y]
oldlinelist[comsec[i]] = ('COMMENT: ' + str(lognorm_set[i][j]))
# Creating a new file for each uncertainty perturbation
new_file = NTF(mode='w', dir=cwd3, suffix='.txt', delete=False, encoding="utf8")
# and write everything back into their own new file
for item in oldlinelist:
new_file.write("%s\n" % item)
new_file.close()
progressbar.next()
progressbar.finish()
return cwd3
# Creating the input file for Bolsig minus
def bolsig_file(inputdir, cwd, og_cwd, species_name, num_EN_values):
# Making a directory for the run files for Bolsig
makedirs('BOLSIG_Run_Files')
# Opening the template runfile
file = open(join(og_cwd, 'bolsig_run_script.dat'), "r+", encoding='utf8')
# Splitting the lines of the file into a readable list
linelist = file.read().splitlines()
# Initial Conditions
inputlocation = 0
outputlocation = 0
species = 0
num_EN_loc = 0
outputdir = str(cwd) + "/Output_Files"
makedirs(outputdir)
# Loop to find were in the file the input and output filenames go
for i in range(len(linelist)):
# Finding the Input filename
if linelist[i].find('/ File') != -1:
inputlocation = i
elif linelist[i].find('/ Species') != -1:
species = i
elif linelist[i].find('/ Output File') != -1:
outputlocation = i
elif linelist[i].find('/ Number') != -1:
num_EN_loc = i
dir_list = listdir(inputdir)
progressbar = Bar('Creating Run Files ', max=len(dir_list), suffix='%(percent)d%%')
# Creating a run file for each data file
for inputfile in dir_list:
runfilename = ('run_' + inputfile)
runfile = ('BOLSIG_Run_Files/' + runfilename)
# Opening the runfile in a writable capacity
with open(runfile, 'w', encoding='utf8') as f:
# Updating the file with the respective input and output filenames
# Along with every other line in the template file
for j in range(len(linelist)):
if j == inputlocation:
f.write(('\"' + inputdir + "/" + inputfile + '\"' + ' / File\n'))
elif j == outputlocation:
f.write(('\"' + outputdir + '/output_' + inputfile + '\"' + ' / Output File\n'))
elif j == species:
f.write((species_name + ' / Species\n'))
elif j == num_EN_loc:
f.write((str(num_EN_values) + ' / Number\n'))
else:
f.write(linelist[j] + '\n')
progressbar.next()
progressbar.finish()
return outputdir
# Function to Run BOLSIG-
def bolsig_minus(cwd, bolsig, num_cpus):
# Gets list of runfiles
infile_list = glob.glob(str(cwd)+"/BOLSIG_Run_Files/run*")
progressbar = Bar('Running BOLSIG- ', max=len(infile_list), suffix='%(percent)d%%')
# Function that will execute BOLSIG-
def solver(infiles):
proc = Popen([bolsig, infiles], stdout=DEVNULL)
proc.wait()
progressbar.next()
with futures.ThreadPoolExecutor(max_workers=num_cpus) as execute_solver:
execute_solver.map(solver, infile_list)
progressbar.finish()
return
# Read all output files and extract data
def output_file_read(outputdir, startsec, num_values, num_e):
# List of data set title to be searched for
titlelist = ['Electric field / N (Td)', 'Mobility *N (1/m/V/s)',
'Diffusion coefficient *N (1/m/s)',
'Energy mobility *N (1/m/V/s)', 'Energy diffusion coef. D*N (1/m/s)',
'Total collision freq. /N (m3/s)', 'Momentum frequency /N (m3/s)',
'Total ionization freq. /N (m3/s)', 'Townsend ioniz. coef. alpha/N (m2)',
'Inelastic energy loss coefficient (eV m3/s)',
'Rate coefficient (m3/s)', 'Energy loss coefficient (eV m3/s)']
# Array to hold E/N values
red_e_field = []
# Setting up holding mattrices for each set of coefficients
mobil = empty((num_e, num_values))
dif_cof = empty((num_e, num_values))
e_mob = empty((num_e, num_values))
e_dif_cof = empty((num_e, num_values))
tot_col_freq = empty((num_e, num_values))
p_freq = empty((num_e, num_values))
tot_ion_freq = empty((num_e, num_values))
town_ion_cof = empty((num_e, num_values))
inelastic_loss_cof = empty((num_e, num_values))
rate_matrix = empty((num_e, len(startsec)))
e_loss_matrix = empty((num_e, len(startsec)))
name_rate = []
name_energy = []
rate_dict = {}
e_loss_dict = {}
outfile_list = glob.glob(outputdir + "/output*")
# Loop to extract the outputted data from each file
for outputfile, i in zip(outfile_list, range(len(outfile_list))):
# Initial conditions for later
r = 0
e = 0
name_rate.clear()
name_energy.clear()
# Opening each data file to be checked
with open(outputfile, 'r', encoding='utf-8') as f:
# extracting each line from the file and listing it to be searched
linelist = f.read().splitlines()
# Iterating over the list of lines
for j in range(len(linelist)):
# CHecking for the phrase specified above
if linelist[j].find(titlelist[0]) != -1:
# collecting all the subsequent data for that set
for k in range(1, (num_e + 1)):
e_n, unused = linelist[(j + k)].split('\t')
# Saving E/N values
red_e_field.append(e_n)
elif linelist[j].find(titlelist[1]) != -1:
for k in range(1, (num_e + 1)):
e_n, mob = linelist[(j + k)].split('\t')
mobil[(k - 1), i] = mob
# As above
elif linelist[j].find(titlelist[2]) != -1:
for k in range(1, (num_e + 1)):
e_n, dif = linelist[(j + k)].split('\t')
dif_cof[(k - 1), i] = dif
# As above
elif linelist[j].find(titlelist[3]) != -1:
for k in range(1, (num_e + 1)):
e_n, e_mobility = linelist[(j + k)].split('\t')
e_mob[(k - 1), i] = e_mobility
# As above
elif linelist[j].find(titlelist[4]) != -1:
for k in range(1, (num_e + 1)):
e_n, e_diffusion = linelist[(j + k)].split('\t')
e_dif_cof[(k - 1), i] = e_diffusion
# As above
elif linelist[j].find(titlelist[5]) != -1:
for k in range(1, (num_e + 1)):
e_n, total_collision = linelist[(j + k)].split('\t')
tot_col_freq[(k - 1), i] = total_collision
# As above
elif linelist[j].find(titlelist[6]) != -1:
for k in range(1, (num_e + 1)):
e_n, mom_freq = linelist[(j + k)].split('\t')
p_freq[(k - 1), i] = mom_freq
# As above
elif linelist[j].find(titlelist[7]) != -1:
for k in range(1, (num_e + 1)):
e_n, total_ion = linelist[(j + k)].split('\t')
tot_ion_freq[(k - 1), i] = total_ion
# As above
elif linelist[j].find(titlelist[8]) != -1:
for k in range(1, (num_e + 1)):
e_n, townsend_ion = linelist[(j + k)].split('\t')
town_ion_cof[(k - 1), i] = townsend_ion
# As above
elif linelist[j].find(titlelist[9]) != -1:
for k in range(1, (num_e + 1)):
e_n, inelastic = linelist[(j + k)].split('\t')
inelastic_loss_cof[(k - 1), i] = inelastic
# As above
elif linelist[j].find(titlelist[10]) != -1:
# Taking the name of the data set
name_rate.append(linelist[j - 1].rstrip())
for k in range(1, (num_e + 1)):
e_n, rate = linelist[(j + k)].split('\t')
# Saving the data to a collective matrix
rate_matrix[(k - 1), r] = rate
r += 1
# As above
elif linelist[j].find(titlelist[11]) != -1:
name_energy.append(linelist[j - 1].rstrip())
for k in range(1, (num_e + 1)):
e_n, e_loss = linelist[(j + k)].split('\t')
e_loss_matrix[(k - 1), e] = e_loss
e += 1
# Saving each set of rate and energy loss coefficients to collective dictionary
rate_dict[i] = rate_matrix.copy()
e_loss_dict[i] = e_loss_matrix.copy()
gen_mat = [mobil, dif_cof, e_mob, e_dif_cof, tot_col_freq, p_freq, tot_ion_freq, town_ion_cof,
inelastic_loss_cof]
# Reaturning all releveant mattrices and dictionaries
return red_e_field, gen_mat, e_loss_dict, rate_dict, name_rate, name_energy, titlelist, num_e
# Calculate stats for each mobility set
def gen_stats(gen_mat, num_values):
# Resultant matrix for the calculated values
result = []
for x in range(len(gen_mat)):
y = gen_mat[x]
result_single = []
# Iterate over each E/N value
for i in range(len(y[:, 0])):
# Initial values
m_0 = y[i, 0]
s_0 = 0
std_mean = 0
std_pop = 0
# Iterate for each values in the E/N set
for j in range(1, num_values):
m = m_0 + (y[i, j] - m_0) / (j + 1)
s = s_0 + ((y[i, j] - m_0) * (y[i, j] - m))
m_0 = m
s_0 = s
std_mean = sqrt(s_0 / ((num_values ** 2) - num_values))
std_pop = sqrt(s_0 / (num_values - 1))
result_single.append([m_0, std_mean, std_pop])
result.append(result_single)
return result
# Calculation of rate coefficient stats
def rate_cof(rate, names, num_values, num_e):
# Retrieving just the values from the dictionary
rate_matrix = list(rate.values())
# Setting up a subsequent dictionary
rates_mean_std = {}
# Iterating over the number of types of rate coefficients
for i in range(len(names)):
# GEneral saving matrix for each data type
result = []
for j in range(0, num_e):
# Extracting the initial value for each E/N value from each data type
m_0 = rate_matrix[0][j, i]
s_0 = 0
std_mean = 0
std_pop = 0
# Iterate over number of data values
for k in range(1, num_values):
m = m_0 + ((rate_matrix[k][j, i]) - m_0) / (k + 1)
s = s_0 + ((rate_matrix[k][j, i]) - m_0) * (rate_matrix[k][j, i] - m)
m_0 = m
s_0 = s
std_mean = sqrt(s_0 / ((num_values ** 2) - num_values))
std_pop = sqrt(s_0 / (num_values - 1))
# Final calculation and saving of the mean and std values for each E/N value
result.append([m_0, std_mean, std_pop])
rates_mean_std[names[i]] = result.copy()
return rates_mean_std
# As described in the above function but for energy loss
def energy_loss(e_loss, names, num_values, num_e):
e_matrix = list(e_loss.values())
energy_loss_mean_std = {}
for i in range(len(names)):
result = []
for j in range(0, num_e):
m_0 = e_matrix[0][j, i]
s_0 = 0
std_mean = 0
std_pop = 0
for k in range(1, num_values):
m = m_0 + ((e_matrix[k][j, i]) - m_0) / (k + 1)
s = s_0 + ((e_matrix[k][j, i]) - m_0) * (e_matrix[k][j, i] - m)
m_0 = m
s_0 = s
std_mean = sqrt(s_0 / ((num_values ** 2) - num_values))
std_pop = sqrt(s_0 / (num_values - 1))
# Final calculation and saving of the mean and std values for each E/N value
result.append([m_0, std_mean, std_pop])
energy_loss_mean_std[names[i]] = result.copy()
return energy_loss_mean_std
# Saving all calculated statistics to a single text file
def stats_file(red_e_field, all_stats, rate_stats, e_loss_stats, names_rate, names_energy, titlelist):
# Opening said text file
with open('data_stats.txt', 'w') as f:
f.write('Below is the mean and standard deviation of the mean \nfor a range of data types listed, '
'such data was \nobtained by using BOLSIG- and inputed data file.\n')
for i in range(len(all_stats)):
stats = all_stats[i]
f.write('\n' + str(titlelist[i+1]) + '\n' + str(titlelist[0]) + ' Mean STD of the Mean '
' STD of the Pop. Percentage Error\n')
for j in range(len(stats[:])):
if stats[j][0] != 0:
perc_error = (float(stats[j][2]) / float(stats[j][0])) * 100
else:
perc_error = 0
f.write(' ' + str(red_e_field[j]) + ' ' +
f_f(float(stats[j][0]), precision=6, unique=False) + ' ' +
f_f(float(stats[j][1]), precision=6, unique=False) + ' ' +
f_f(float(stats[j][2]), precision=6, unique=False) + ' ' +
str("%.3f" % perc_error) + '%\n')
for i in range(len(names_rate)):
f.write('\n' + names_rate[i] + ' - Rate Coefficient\n' + str(titlelist[0]) +
' Mean STD of the Mean STD of the Pop. Percentage Error\n')
rate_data = rate_stats[names_rate[i]]
for j in range(len(rate_data[:])):
if rate_data[j][0] != 0:
perc_error = (float(rate_data[j][2])/float(rate_data[j][0]))*100
else:
perc_error = 0
f.write(' ' + str(red_e_field[j]) + ' ' +
f_f(float(rate_data[j][0]), precision=6, unique=False) + ' ' +
f_f(float(rate_data[j][1]), precision=6, unique=False) + ' ' +
f_f(float(rate_data[j][2]), precision=6, unique=False) + ' ' +
str("%.3f" % perc_error) + '%\n')
for i in range(len(names_energy)):
f.write('\n' + names_energy[i] + ' - Energy Loss Coefficient\n' + str(titlelist[0]) +
' Mean STD of the Mean STD of the Pop. Percentage Error\n')
e_loss_data = e_loss_stats[names_energy[i]]
for j in range(len(e_loss_data[:])):
if e_loss_data[j][0] != 0:
perc_error = (float(e_loss_data[j][2]) / float(e_loss_data[j][0])) * 100
else:
perc_error = 0
f.write(' ' + str(red_e_field[j]) + ' ' +
f_f(float(e_loss_data[j][0]), precision=6, unique=False) + ' ' +
f_f(float(e_loss_data[j][1]), precision=6, unique=False) + ' ' +
f_f(float(e_loss_data[j][2]), precision=6, unique=False) + ' ' +
str("%.3f" % perc_error) + '%\n')
# Graph each set of statistics and save
def graphing_data(red_e_field, stats, rate_stats, e_loss_stats, names_rate, names_energy):
# Data type names
image_name = ['Mobility', 'Diffusion coefficient',
'Energy mobility', 'Energy diffusion coef.', 'Total collision freq.', 'Momentum frequency',
'Total ionization freq.', 'Townsend ioniz. coef.', 'Inelastic energy loss coefficient']
# Correspoding units
units = [' *N (1/m/V/s)', ' *N (1/m/s)', ' *N (1/m/V/s)', ' D*N (1/m/s)', ' /N (m3/s)', ' /N (m3/s)',
' /N (m3/s)', ' alpha/N (m2)', ' (eV m3/s)']
# X-axis values are constant for all data types
x = [float(i) for i in red_e_field[0:50]]
# Iterating through all data set types to produce a graph of each
for i in range(len(stats)):
# Taking a the y-values and corresponding error for each x-value
y = [item[0] for item in stats[i]]
yerr = [item[2] for item in stats[i]]
# Opening figure for the graph
plt.figure()
# X and Y axis labels
plt.xlabel('Reduced Electric Field, E/N (Td)')
plt.ylabel(image_name[i] + units[i])
# Changing to log scales
plt.yscale('log')
plt.xscale('log')
# Plotting errorbar graphs
plt.errorbar(x, y, yerr=yerr, ecolor='k')
plt.tight_layout()
plt.savefig(image_name[i] + '.png')
plt.close()
# Name and units for Rate and Energy loss coefficients
r_and_e = ['Rate coefficient', 'Energy loss coefficient']
units_re = [' (m3/s)', ' (eV m3/s)']
# Rate Coefficient
plt.figure()
plt.xlabel('Reduced Electric Field, E/N (Td)')
plt.ylabel(r_and_e[0] + units_re[0])
plt.yscale('log')
plt.xscale('log')
for j in range(len(names_rate)):
y = [item[0] for item in rate_stats[names_rate[j]]]
yerr = [item[2] for item in rate_stats[names_rate[j]]]
plt.errorbar(x, y, yerr=yerr)
plt.tight_layout()
# plt.legend(names_rate)
plt.savefig(r_and_e[0] + '.png')
plt.close()
# Energy loss Coeffiecient
plt.figure()
plt.xlabel('Reduced Electric Field, E/N (Td)')
plt.ylabel(r_and_e[1] + units_re[1])
plt.yscale('log')
plt.xscale('log')
for j in range(len(names_energy)):
y = [item[0] for item in e_loss_stats[names_energy[j]]]
yerr = [item[2] for item in e_loss_stats[names_energy[j]]]
plt.errorbar(x, y, yerr=yerr)
plt.tight_layout()
# plt.legend(names_rate)
plt.savefig(r_and_e[1] + '.png')
plt.close()
# for k in range(len(names_energy)):
# plt.figure()
# plt.title(names_energy[k])
# plt.xlabel('Reduced Electric Field, E/N (Td)')
# plt.ylabel(r_and_e[1] + units_re[1])
# plt.yscale('log')
# plt.xscale('log')
# y = [item[0] for item in e_loss_stats[names_energy[k]]]
# yerr = [item[2] for item in e_loss_stats[names_energy[k]]]
# plt.errorbar(x, y, yerr=yerr, ecolor='k')
# plt.savefig(r_and_e[1] + str(k) + '.png')
# plt.close()
mob = [item[0] for item in stats[0]]
dif = [item[0] for item in stats[1]]
mob_err = [item[2] for item in stats[0]]
dif_err = [item[2] for item in stats[1]]
dif_mob = []
dif_mob_err = []
for i in range(len(stats[0])):
dif_mob.append(dif[i]/mob[i])
dif_mob_err.append(dif_mob[i]*sqrt((mob_err[i]/mob[i])**2 + (dif_err[i]/dif[i])**2))
plt.figure()
plt.xlabel('Reduced Electric Field, E/N (Td)')
plt.ylabel(r'$D_T/\mu, (V)$')
plt.yscale('log')
plt.xscale('log')
plt.errorbar(x, dif_mob, yerr=dif_mob_err, ecolor='k')
plt.tight_layout()
plt.savefig('Dif_mobility_ratio.png')
plt.close()
"""
Morris Method Code:
*** This code was created to assess the sensitivity plasma transport coefficients have to variations in their
corresponding cross-sections ***
"""
def Morris(subject, data, uncert_set, runs, species, num_EN_values, bolsig, num_cpus, p_values):
start = time.time()
og_cwd = getcwd()
cwd = changedir(og_cwd, subject)
cwd2 = join(cwd, 'Morris')
# Checking if directory exists, if yes remove and replace with clean directory
if exists(cwd2):
rmtree(cwd2)
# Making and navigating to a new directory to store the new files
makedirs(cwd2)
elif not exists(cwd2):
makedirs(cwd2)
logging.basicConfig(filename=join(cwd2, "program.log"), filemode='w', level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
linelist = data_set(data)
startsec, dashsec = search_set(linelist)
com_format(startsec, dashsec, linelist, data)
linelist = data_set(data)
startsec, dashsec = search_set(linelist)
commsec = comment_sec(startsec, dashsec, linelist)
dataset_name = read_energy(startsec, linelist)
sample_set, delta, num_params = run_morris(uncert_set, p_values, runs)
sample_set = cdf(uncert_set, sample_set)
logging.info('Morris trajectories created - working directories created')
cs_data = find_num(startsec, dashsec, linelist)
inputfiles = data_edit_morris(cs_data, sample_set, data, cwd, dashsec, commsec, cwd2)
logging.info('Input files created')
inputdir = bolsig_file_morris(inputfiles, cwd2, og_cwd, species, num_EN_values)
logging.info('BOLSIG run files created')
bolsig_minus_morris(inputdir, bolsig, num_cpus)
logging.info('All BOLSIG simulations complete')
mean, std, energy = morris_stats(inputfiles, inputdir, sample_set, delta, num_EN_values, num_params)
logging.info('Statistics completed')
print_stats(mean, std, energy, cwd2, dataset_name)
logging.info('Stats file created')
logging.info('Normalising Stats')
normal_stats(mean, std, energy, dataset_name)
logging.info('Stats Normalised')
end = time.time()
print(end-start)
# Reading the energy of each collision for information later
def read_energy(startsec, linelist):
data_energy = []
for x in startsec:
data_energy.append(linelist[x+2])
return data_energy
# Compiling all matrices and calculation Morris trajectory matrix
def run_morris(uncert_set, num_levels, runs):