-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathgridex.py
1247 lines (1032 loc) · 68 KB
/
gridex.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Grid-ex
Grid Spectroscopy in an IPython environment
Class to read and analyze Nanonis data files (grid spectroscopy) in binary or ASCII file format.
Fitting KPFM (Δf vs. V) curves, IZ spectroscopy curves, multiple Gaussian peaks, linear polynomials and arbitrary functions is implemented.
2015-2017, Alex Riss, GPL
Populates the dictionaries:
self.headers (headers in the files, keys are lowercase, space replaced by '_')
and for each point in self.points:
data_header (headers for the particular point, such as x,y,z position)
data (np.array with the data)
Example Usage:
import gridex
import ipywidgets
import IPython
%matplotlib notebook
griddata = gridex.GridData()
griddata.load_spectroscopy('data/KPFM_dimer_constz_grid2_*.dat', long_output=False, first_name='3', last_name='67') # binary data can also be read
griddata.fit_KPFM(x_limit=[-0.8,0.2]) # we can set a x_limit for the fit
# return interactive plot
channels = ['v*_(v)','df*_(hz)','fit_r2','fit_sse','amplitude_mean_(m)','amplitude_stddev_(m)']
gridplot = gridex.PlotData(griddata)
fig = gridplot.plot_channels(channels, num_rows=3, cmap='Blues_r')
IPython.display.display(gridplot.plot_options())
Caveats:
- binary values are read in in Big Endian format. I am not sure if that will change depending on the machine the Nanonis runs on
- for ascii data square grids are assumed.
Todo:
- check if the rotation calculation uses the right convention (clockwise vs anticlockwise) for ASCII data
- better documentation (i.e. exmaples with better comments in the ipython example notebook), describe which data is generated
- make smoothing and derivative options, such as in the nanonis viewers
- have an interface to find images (based on date (+-3 days within the grid), based on other parameters (e.g. const height image, >4 Hz range in frequency shift))
- plot STM/AFM images next to the grid
- provide easy way to plot sweep channel at specific point, e.g. dI/dV maps from point spectroscopy grids
- unit tests
Todo maybe:
- switch image interpolation between 'nearest', 'bilinear', 'bicubic'
- plot grid data as a function of the x,y values, not just the point number (sort of is done for binary files)
- more interactive experience, either with matplotlib widgets, or with mpld3, or with bokeh
"""
from __future__ import print_function
from __future__ import division
import sys
import glob
import struct
import copy
import numpy as np
import numpy.lib.recfunctions as recfunctions # seems we need to import this explicitely
import scipy.special
import scipy.optimize
import io
import matplotlib
import matplotlib.pyplot as plt
import ipywidgets
import seaborn as sns
sns.set_style("dark")
sns.set_context("notebook", font_scale=1.0, rc={"lines.linewidth": 1.5})
sns.set(rc={'axes.facecolor':'white', 'figure.facecolor':'white'})
MASS_ELECTRON = 9.10938356e-31 # kg
PLANCK_CONSTANT = 6.62607004e-34 # m2 kg / s
CONV_J_EV = 6.242e18 # conversion of Joule to eV
PYTHON_VERSION = sys.version_info.major
class GridData(object):
"""Class to read grid date from binary or ascii files. Also fits the KPFM parabola and the exponential IZ curves. """
def __init__(self):
self.filename = "" # filename
self.headers = {} # dictionary containing all the header data
self.data_points = [] # list holding the points, each point is a dictionary with the keys 'data_header' (containing header info from binary data) and 'data' (a structured numpy array containing the data from the sweeps)
self.num_data_points = 0 # number of data points
self.channels = [] # list holding the names of the swept channels
self.points = 0 # number of points per sweep
self.data = None # 3-dimensional data as numpy array (first index is the number of the point, second index represents the channel, third index the index of the sweep)
def load_spectroscopy(self,fname,long_output=False,first_file=None,last_file=None):
"""Load spectrscopy data, either the binary .3ds file or data from a number of .dat files.
The optional parameters "first_file" and "last_file" can be used to further restrict the range for .dat files (substrings of the filename will work for those).
Args:
fname (string): File name to open (wildcards can be used as specified in the python function glob.glob, e.g. "spectroscopy_4_*.dat").
long_output (boolean): Specifies whether to give detailed output.
first_file (string): Used to further restrict the filelist. Specifies the first file within the list of files (when using wildcards for fname). A substring is sufficient.
last-file (string): Used to further restrict the filelist. Specifies the last file within the list of files (when using wildcards for fname). A substring is sufficient.
Raises:
NotImpementedError: If opening files with unknown dextensions.
"""
ext = fname.rsplit(".",1)[-1]
if ext == "3ds":
self.__init__()
self._load_spectroscopy_3ds(fname,long_output)
elif ext == "dat":
self.__init__()
self._load_spectroscopy_dat(fname,long_output,first_file,last_file)
else:
raise NotImplementedError("Error: Unknown file type \"%s\"." % ext)
self.filename = fname
def _load_spectroscopy_3ds(self,fname,long_output):
"""Load spectroscopy data from a .3ds file.
Args:
fname (string): File name to open.
long_output (boolean): Specifies whether to give detailed output.
"""
if PYTHON_VERSION>=3:
f = open(fname, encoding='utf-8', errors='ignore')
else:
f = open(fname)
line = f.readline()
while line:
linestr = line.strip()
if linestr.upper() == ':HEADER_END:': break
line = f.readline()
if not '=' in linestr: continue # some line that does not contain a key-value pair
key, val = linestr.split('=',1)
key, val = key.strip(), val.strip()
key = self._string_simplify1(key)
key_ = self._string_simplify2(key)
val = val.replace('"', '')
# sometimes keys are used more than once (probably not in binary files, but I have seen it in the .dat files), just append an underline in this case
while key in self.headers: key = key + "_"
if key_ == 'grid_dim':
vals = val.split('x')
self.headers[key] = [int(x) for x in vals]
elif key_ =='grid_settings':
vals = val.split(';')
self.headers[key] = [float(x) for x in vals]
elif key_ in ['fixed_parameters', 'experiment_parameters', 'channels']:
val = self._string_simplify1(val)
vals = val.split(';')
self.headers[key] = vals
elif key_ in ['sweep_signal']:
val = self._string_simplify1(val)
self.headers[key] = val
elif key_ == '#_parameters_(4_byte)':
self.headers['num_parameters'] = self.headers[key] = int(val)
elif key_ == 'experiment_size_(bytes)':
self.headers['experiment_size'] = self.headers[key] = int(val)
elif key_ == 'points':
self.headers[key] = int(val)
elif key_ in ['current_(a)','input_4_(v)','lix_1_omega_(a)','liy_1_omega_(a)','delay_before_measuring_(s)']:
self.headers[key] = float(val)
else:
self.headers[key] = val
self.num_data_points = self.headers['num_data_points'] = self.headers['grid_dim'][0] * self.headers['grid_dim'][1]
self.channels = self.headers['channels']
self.points = self.headers['points']
f.close()
f = open(fname, 'rb')
bindata = f.read()
f.close()
bindata = bindata[bindata.index(b':HEADER_END:')+len(':HEADER_END:')+2:] # "+2+ is for the newline
exp_size = self.headers['experiment_size'] + self.headers['num_parameters']*4;
for i_point in range(self.num_data_points):
offset_point = exp_size * i_point
data_list = [] # list of the data, will be converted to structured numpy array at the end
data_list_names = [] # the corresponding names
data_headers = {}
for i, par in enumerate(self.headers['fixed_parameters']+self.headers['experiment_parameters']):
par_key = self._string_simplify1(par)
while par_key in data_headers: par_key = par_key + "_" # in case some names are used twice
data_headers[par_key] = struct.unpack('>f',bindata[offset_point + i*4:offset_point + i*4 + 4]) # read header values in big endian mode
for i_channel,channel in enumerate(self.headers['channels']):
offset_binheaders = self.headers['num_parameters'] * 4
channel_start, channel_end = offset_point + offset_binheaders + i_channel*self.headers['points']*4, offset_point + offset_binheaders + (i_channel+1)*self.headers['points']*4
#arr = np.fromfile(bindata[channel_start:channel_end], dtype = '>f4') # dont know why this did not work, I think f4 for numpy might be different than f4 for struct, but didn't test
arr = struct.unpack('>%sf' % self.points, bindata[channel_start:channel_end])
data_list.append(arr)
#data_list_names.append(channel.replace(' ','_').lower())
data_list_names.append(channel)
# created structured numpy array
data_list_formats = ['float'] * len(data_list_names)
data = np.array(data_list)
data = np.core.records.fromarrays(data, names=data_list_names, formats=data_list_formats)
#data = data.transpose()
self.data_points.append({'data_headers': data_headers, 'data': data})
# generate 3D numpy array for faster access and slicing
self.data = self._generate_3D_data(self.data_points)
def _load_spectroscopy_dat(self,fnames,long_output,first_file=None,last_file=None):
"""Load spectroscopy data from a .dat files.
Args:
fname (string): File name to open (wildcards can be used as specified in the python function glob.glob, e.g. "spectroscopy_4_*.dat").
long_output (boolean): Specifies whether to give detailed output.
first_file (string): Used to further restrict the filelist. Specifies the first file within the list of files (when using wildcards for fname). A substring is sufficient.
last-file (string): Used to further restrict the filelist. Specifies the last file within the list of files (when using wildcards for fname). A substring is sufficient.
"""
read_somedata = False
if first_file:
first_file_found=False
files_read = 0
for i_file,fname in enumerate(glob.glob(fnames)):
# first_first and last_file limits
if first_file and not first_file_found:
if first_file in fname:
first_file_found=True
else:
continue
if long_output: print("loading %s" % fname)
f = open(fname)
data = {}
data_headers = {}
line = f.readline()
while line:
linestr = line.strip()
if linestr.upper() == '[DATA]': break
line = f.readline()
if not '\t' in linestr: continue # some line that does not contain a key-value pair
key, val = linestr.split('\t',1)
key, val = key.strip(), val.strip()
key = self._string_simplify1(key)
key_ = self._string_simplify2(key)
val = val.replace('"', '')
# sometimes keys are used more than once, just append an underline in this case
while key in data_headers: key = key + "_"
if key_ in ['x_(m)', 'y_(m)', 'z_(m)','current_(a)','final_Z_(m)','input_4_(v)','lix_1_omega_(a)','liy_1_omega_(a)','z_offset_(m)', 'z_sweep_distance_(m)', 'settling_time_(s)', 'integration_time_(s)', 'final_z_(m)', 'temperature_afm_(v)', 'applied_voltage_measured_(v)', 'bias_(v)', 'phase_(deg)', 'amplitude_(m)', 'frequency_shift_(hz)', 'excitation_(v)']:
data_headers[key] = float(val)
else:
data_headers[key] = val
read_somedata = True
names = f.readline().strip().split("\t") # get field names myself (the numpy.genfromtxt gets rid of the "(" and ")" and also does not transfrm to lowercase)
names = [self._string_simplify1(n) for n in names]
f_data = str.encode(f.read())
data = np.genfromtxt(io.BytesIO(f_data), names=names)
data.dtype.names = names # the genfromtxt gets rid of the "(" and ")"
if files_read>0:
if self.channels != data.dtype.names or self.points != data.shape[0]:
print("Warning: change in data structure in file %s. Stopping to read data." % fname)
break
else:
self.headers['channels'] = self.channels = data.dtype.names
self.headers['points'] = self.points = data.shape[0]
self.data_points.append({'data_headers': data_headers, 'data': data})
files_read += 1
f.close()
if last_file:
if last_file in fname:
break
if not read_somedata:
print('Error: no data read.')
return False
# important info for the headers
self.num_data_points = self.headers['num_data_points'] = len(self.data_points)
# generate 3D numpy array for faster access and slicing
self.data = self._generate_3D_data(self.data_points)
print("%s files read." % (files_read))
def _generate_3D_data(self,data_points):
"""Generates a 3D numpy array holding all the data.
Args:
data_points: List of data_points (that include the channel data).
Returns:
3d numpy arrau: containing all the data for each point in one numpy array.
"""
data3D = np.zeros((len(data_points), len(self.channels), self.points))
for i_data_point in range(len(data_points)):
for i_channel in range(len(self.channels)):
data3D[i_data_point,i_channel,:] = data_points[i_data_point]['data'][self.channels[i_channel]]
return data3D
def fit_KPFM(self, x_limit=[]):
"""fits the KPFM parabolas and adds 'fit_type' and 'fit_coeffs' to the data_headers.
Using the optional parameter x_limit a range can be defined where the fit is done (in x-axis units).
Also extracts 'V*', 'df*', as well as the 'fit_sse' (sum of squares due to error), and 'fit_r2' (the r squared value). If x_limit is specified, also fit_sse_fullrange and fit_r2_fullrange will be calculated to represent the values for the full x range.
These will be added to the data_header for each point.
Also adds the amplitude mean and standard deviation ('amplitude_mean_(m)' and 'amplitude_stddev_(m)') - if the data exists. Further the fitted line and the residuals are added to the data sweeps."""
# some sanity checks
p0names = self.data_points[0]['data'].dtype.names
if (not 'bias_[avg]_(v)' in p0names and not 'bias_(v)' in p0names):
print("Error: Bias channel not found in the data. Cannot fit KPFM")
return False
if (not 'frequency_shift_[avg]_(hz)' in p0names and not 'frequency_shift_(hz)' in p0names):
print("Error: Frequency shift channel not found in the data. Cannot fit KPFM.")
return False
for i,p in enumerate(self.data_points):
if 'bias_[avg]_(v)' in p['data'].dtype.names:
x = p['data']['bias_[avg]_(v)']
if not 'bias_(v)' in p['data'].dtype.names:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'bias_(v)',p['data']['bias_[avg]_(v)']) # just so that it will be easier to plot from the outside
else:
x = p['data']['bias_(v)']
if 'frequency_shift_[avg]_(hz)' in p['data'].dtype.names:
y = p['data']['frequency_shift_[avg]_(hz)']
if not 'frequency_shift_(hz)' in p['data'].dtype.names:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'frequency_shift_(hz)',p['data']['frequency_shift_[avg]_(hz)']) # just so that it will be easier to plot from the outside
else:
y = p['data']['frequency_shift_(hz)']
if 'amplitude_[avg]_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_[avg]_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_[avg]_(m)'])
elif 'amplitude_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_(m)'])
if len(x_limit)==2: # set limit for fits
x_limit_i = [ (np.abs(x-x_limit[0])).argmin() , (np.abs(x-x_limit[1])).argmin()]
if x_limit_i[0] > x_limit_i[1]:
x_limit_i[0], x_limit_i[1] = x_limit_i[1], x_limit_i[0]
self.data_points[i]['data_headers']['fit_x_limit_i_start'] = x[x_limit_i[0]]
self.data_points[i]['data_headers']['fit_x_limit_i_end'] = x[x_limit_i[1]]
x_fit = x[x_limit_i[0]:x_limit_i[1]+1]
y_fit = y[x_limit_i[0]:x_limit_i[1]+1]
else:
x_fit = x
y_fit = y
coeffs = np.polyfit(x_fit,y_fit,2)
f = np.poly1d(coeffs)
yhat = f(x)
ybar = np.sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2)
sstot = np.sum((y - ybar)**2)
sserr = np.sum((y - yhat)**2)
yhat_fit = f(x_fit)
ybar_fit = np.sum(y_fit)/len(y_fit)
ssreg_fit = np.sum((yhat_fit-ybar_fit)**2)
sstot_fit = np.sum((y_fit - ybar_fit)**2)
sserr_fit = np.sum((y_fit - yhat_fit)**2)
if 'fit_frequency_shift_(hz)' in self.data_points[i]['data'].dtype.names:
self.data_points[i]['data']['fit_frequency_shift_(hz)'] = yhat
self.data_points[i]['data']['fit_res_frequency_shift_(hz)'] = y - yhat # residuals
else:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_frequency_shift_(hz)', yhat)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_res_frequency_shift_(hz)', y - yhat) # residuals
self.data_points[i]['data_headers']['fit_type'] = "KPFM"
self.data_points[i]['data_headers']['fit_coeffs'] = coeffs
self.data_points[i]['data_headers']['fit_r2'] = ssreg_fit / sstot_fit
self.data_points[i]['data_headers']['fit_sse'] = sserr_fit
self.data_points[i]['data_headers']['fit_r2_fullrange'] = ssreg / sstot # _fullrange takes the full range for error calculation (i.e. not taking into account x_limit)
self.data_points[i]['data_headers']['fit_sse_fullrange'] = sserr
self.data_points[i]['data_headers']['v*_(v)'] = -coeffs[1]/(2*coeffs[0])
self.data_points[i]['data_headers']['df*_(hz)'] = coeffs[2] - coeffs[1]**2/(4*coeffs[0])
def fit_IZ(self, oscillation_correction=False, x_limit=[]):
"""fits the IZ parabolas and adds 'fit_type' and 'fit_coeffs' to the data_headers.
Also extracts the work function 'phi', as well as the 'fit_sse' (sum of squares due to error), and 'fit_r2' (the r squared value).
Using the optional parameter x_limit a range can be defined where the fit is done (in x-axis units). If x_limit is specified, also fit_sse_fullrange and fit_r2_fullrange will be calculated to represent the values for the full x range.
These will be added to the data_header for each point.
Also adds the amplitude mean and standard deviation ('amplitude_mean_(m)' and 'amplitude_stddev_(m)') - if the data exists.
Further the fitted line and the residuals are added to the data sweeps.
If the parameter 'oscillation_correction' is set to True, another fit will be computed taking into account the z-oscillation (sensor amplitude).
The fit then is a Bessel function of the first kind: I = I0 * exp(-2kz) * J0(2kA)
"""
# some sanity checks
p0names = self.data_points[0]['data'].dtype.names
if (not 'z_[avg]_(m)' in p0names and not 'z_(m)' in p0names):
print("Error: Z channel not found in the data. Cannot fit IZ.")
return False
if (not 'current_[avg]_(A)' in p0names and not 'current_(a)' in p0names):
print("Error: Current channel not found in the data. Cannot fit IZ.")
return False
if oscillation_correction and (not 'amplitude_[avg]_(A)' in p0names and not 'amplitude_(a)' in p0names):
print("Error: Amplitude channel not found in the data. Cannot fit IZ with oscillation correction.")
return False
for i,p in enumerate(self.data_points):
if 'z_[avg]_(m)' in p['data'].dtype.names:
x = p['data']['z_[avg]_(m)']
if not 'z_(m)' in p['data'].dtype.names:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'z_(m)',p['data']['z_[avg]_(m)']) # just so that it will be easier to plot from the outside
else:
x = p['data']['z_(m)']
if 'current_[avg]_(a)' in p['data'].dtype.names:
y_current = p['data']['current_[avg]_(a)']
if not 'current_(a)' in p['data'].dtype.names:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'current_(a)',p['data']['current_[avg]_(a)']) # just so that it will be easier to plot from the outside
else:
y_current = p['data']['current_(a)']
if 'amplitude_[avg]_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_[avg]_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_[avg]_(m)'])
amplitude = p['data']['amplitude_[avg]_(m)']
elif 'amplitude_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_(m)'])
amplitude = p['data']['amplitude_(m)']
y = np.log(np.abs(y_current))
if len(x_limit)==2: # set limit for fits
x_limit_i = [ (np.abs(x-x_limit[0])).argmin() , (np.abs(x-x_limit[1])).argmin()]
if x_limit_i[0] > x_limit_i[1]:
x_limit_i[0], x_limit_i[1] = x_limit_i[1], x_limit_i[0]
self.data_points[i]['data_headers']['fit_x_limit_i_start'] = x[x_limit_i[0]]
self.data_points[i]['data_headers']['fit_x_limit_i_end'] = x[x_limit_i[1]]
x_fit = x[x_limit_i[0]:x_limit_i[1]+1]
y_fit = y[x_limit_i[0]:x_limit_i[1]+1]
else:
x_fit = x
y_fit = y
coeffs = np.polyfit(x_fit,y_fit,1)
f = np.poly1d(coeffs)
yhat = f(x)
ybar = np.sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2)
sstot = np.sum((y - ybar)**2)
sserr = np.sum((y - yhat)**2)
yhat_fit = f(x_fit)
ybar_fit = np.sum(y_fit)/len(y_fit)
ssreg_fit = np.sum((yhat_fit-ybar_fit)**2)
sstot_fit = np.sum((y_fit - ybar_fit)**2)
sserr_fit = np.sum((y_fit - yhat_fit)**2)
yhat_exp = np.exp(yhat)
y_current_bar = np.sum(y_current)/len(y_current)
if y_current_bar < 0: yhat_exp = -yhat_exp # account for the sign of the current
if 'fit_current_(a)' in self.data_points[i]['data'].dtype.names:
self.data_points[i]['data']['log_current_(a)'] = y
self.data_points[i]['data']['fit_log_current_(a)'] = yhat
self.data_points[i]['data']['fit_res_log_current_(a)'] = y - yhat # residuals
self.data_points[i]['data']['fit_current_(a)'] = yhat_exp
self.data_points[i]['data']['fit_res_current_(a)'] = y_current - yhat_exp # residuals
else:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'log_current_(a)', y)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_log_current_(a)', yhat)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_res_log_current_(a)', y - yhat) # residuals
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_current_(a)', yhat_exp)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_res_current_(a)', y_current - yhat_exp) # residuals
self.data_points[i]['data_headers']['fit_type'] = "IZ"
self.data_points[i]['data_headers']['fit_coeffs'] = coeffs
self.data_points[i]['data_headers']['fit_r2'] = ssreg_fit / sstot_fit
self.data_points[i]['data_headers']['fit_sse'] = sserr_fit
self.data_points[i]['data_headers']['fit_r2_fullrange'] = ssreg / sstot # _fullrange takes the full range for error calculation (i.e. not taking into account x_limit)
self.data_points[i]['data_headers']['fit_sse_fullrange'] = sserr
self.data_points[i]['data_headers']['phi_(j)'] = coeffs[0]**2 * (PLANCK_CONSTANT/(2*np.pi))**2 / (8 * MASS_ELECTRON)
self.data_points[i]['data_headers']['phi_(ev)'] = self.data_points[i]['data_headers']['phi_(j)'] * CONV_J_EV
if oscillation_correction:
def log_curent_bessel(xx, k, logI0):
z = xx[0][:,0]
A = xx[0][:,1]
return logI0 + np.log(scipy.special.jv(0, -k*A)) + k*z # jv(0, x) is the Bessel function of the first kind of order 0; -2*kappa = k
coeffs2, pcov = scipy.optimize.curve_fit(f=log_curent_bessel, xdata=np.dstack([x,amplitude]), ydata=y, p0=[coeffs[0], coeffs[1]], maxfev=1000) # p0 are initial values which are taken from the previous fit
yhat2 = log_curent_bessel(np.dstack([x,amplitude]), coeffs2[0], coeffs2[1])
ssreg2 = np.sum((yhat2-ybar)**2)
sserr2 = np.sum((y - yhat2)**2)
yhat_exp2 = np.exp(yhat2)
if y_current_bar < 0: yhat_exp2 = -yhat_exp2 # account for the sign of the current
if 'fit2_current_(a)' in self.data_points[i]['data'].dtype.names:
self.data_points[i]['data']['fit2_log_current_(a)'] = yhat2
self.data_points[i]['data']['fit2_res_log_current_(a)'] = y - yhat2 # residuals
self.data_points[i]['data']['fit2_current_(a)'] = yhat_exp2
self.data_points[i]['data']['fit2_res_current_(a)'] = y_current - yhat_exp2 # residuals
else:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit2_log_current_(a)', yhat2)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit2_res_log_current_(a)', y - yhat2) # residuals
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit2_current_(a)', yhat_exp2)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit2_res_current_(a)', y_current - yhat_exp2) # residuals
self.data_points[i]['data_headers']['fit2_type'] = "IZosc"
self.data_points[i]['data_headers']['fit2_coeffs'] = coeffs2
self.data_points[i]['data_headers']['fit2_r2'] = ssreg2 / sstot
self.data_points[i]['data_headers']['fit2_sse'] = sserr2
self.data_points[i]['data_headers']['phi2_(j)'] = coeffs2[0]**2 * (PLANCK_CONSTANT/(2*np.pi))**2 / (8 * MASS_ELECTRON)
self.data_points[i]['data_headers']['phi2_(ev)'] = self.data_points[i]['data_headers']['phi2_(j)'] * CONV_J_EV
def fit_IZosc(self):
"""shorthand for fit_IZ(oscillation_correction=True)"""
if 'amplitude_[avg]_(m)' in self.data_points[0]['data'].dtype.names or 'amplitude_(m)' in self.data_points[0]['data'].dtype.names:
self.fit_IZ(oscillation_correction=True)
else:
print('Error: point-per point amplitude information not found, but is necessary for the IZ fit with oscillation correction.')
def fit_Gaussian(self, x_channel='bias_(v)', y_channel='current_(a)', num_peaks=1, initial_params=None, initial_params_bounds=(-np.inf, np.inf), extra_offset=False, x_limit=[], **kwargs):
"""Fits Gaussian curves into a data set.
Uses scipy.optimize.curve_fit. Adds 'fit_type' and 'fit_coeffs' to the data_headers.
Parameters:
- x_channel: name of channel to use for x-axis
- y_channel: name of channel to use for y-axis
- num_peaks: number of Gaussian peaks to use for fitting
- initial_params: list of parameters corresponding to (A, xpos, sigma) of each peak. Last parameter is y-offset, which is only used if extra_offset is True
- initial_params_bounds: list of lower and upper bounds for parameters to fit
- extra_offset: specifies whether to add an extra constant offset
- x_limit: tuple with lower and upper x-limit, specifies whether to restrict the fitting for a range of x_values
- other kwargs: will be passed to scipy.optimize.curve_fit
Also extracts the 'fit_sse' (sum of squares due to error), and 'fit_r2' (the r squared value). If x_limit is specified, also fit_sse_fullrange and fit_r2_fullrange will be calculated to represent the values for the full x range.
These will be added to the data_header for each point.
Also adds the amplitude mean and standard deviation ('amplitude_mean_(m)' and 'amplitude_stddev_(m)') - if the data exists. Further the fitted line and the residuals are added to the data sweeps."""
def gaussian(x,A,xpos,sigma):
return A*np.exp(-((x-xpos)**2/(2*sigma**2)))
def y_calc(x, *params):
#peak_params = np.array(params).reshape([-1,3])
y_sum = np.zeros_like(x)
"""calculate y for a sum of gaussian peaks and an additional offset"""
for i in range(len(params) // 3): # floor division
y_sum += gaussian(x, params[3*i+0], params[3*i+1], params[3*i+2])
return y_sum
def y_calc_offset(x, *params):
return y_calc(x, *params[:-1]) + params[-1]
# some sanity checks
p0names = self.data_points[0]['data'].dtype.names
for ch in [x_channel, y_channel]:
if not ch in p0names:
print("Error: Channel '%s' not found in the data." % ch)
return False
num_extra_params = 0
if extra_offset: num_extra_params = 1
if len(initial_params) != (num_peaks * 3) + num_extra_params:
print("Error: Parameter initial_params does not have the right shape. Should be: %s instead of %s." % ((num_peaks * 3) + num_extra_params, len(initial_params)))
return False
if len(initial_params_bounds) != (num_peaks * 3) + num_extra_params and len(initial_params_bounds)!=2:
print("Error: Parameter initial_params_bounds does not have the right shape. Should be: %s instead of %s." % ((num_peaks * 3) + num_extra_params, len(initial_params_bounds)))
return False
for i,p in enumerate(self.data_points):
x = p['data'][x_channel]
y = p['data'][y_channel]
if 'amplitude_[avg]_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_[avg]_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_[avg]_(m)'])
elif 'amplitude_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_(m)'])
if len(x_limit)==2: # set limit for fits
x_limit_i = [ (np.abs(x-x_limit[0])).argmin() , (np.abs(x-x_limit[1])).argmin()]
if x_limit_i[0] > x_limit_i[1]:
x_limit_i[0], x_limit_i[1] = x_limit_i[1], x_limit_i[0]
self.data_points[i]['data_headers']['fit_x_limit_i_start'] = x[x_limit_i[0]]
self.data_points[i]['data_headers']['fit_x_limit_i_end'] = x[x_limit_i[1]]
x_fit = x[x_limit_i[0]:x_limit_i[1]+1]
y_fit = y[x_limit_i[0]:x_limit_i[1]+1]
else:
x_fit = x
y_fit = y
try:
if extra_offset:
popt, pcov = scipy.optimize.curve_fit(y_calc_offset, x_fit, y_fit, p0=initial_params, bounds=initial_params_bounds, **kwargs)
else:
popt, pcov = scipy.optimize.curve_fit(y_calc, x_fit, y_fit, p0=initial_params, bounds=initial_params_bounds, **kwargs)
except RuntimeError:
popt = initial_params
print("Error in curve_fit for point %d." % i)
if extra_offset:
yhat = y_calc_offset(x, *popt)
yhat_fit = y_calc_offset(x_fit, *popt)
else:
yhat = y_calc(x, *popt)
yhat_fit = y_calc(x_fit, *popt)
ybar = np.sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2)
sstot = np.sum((y - ybar)**2)
sserr = np.sum((y - yhat)**2)
ybar_fit = np.sum(y_fit)/len(y_fit)
ssreg_fit = np.sum((yhat_fit-ybar_fit)**2)
sstot_fit = np.sum((y_fit - ybar_fit)**2)
sserr_fit = np.sum((y_fit - yhat_fit)**2)
if 'fit_%s' % y_channel in self.data_points[i]['data'].dtype.names:
self.data_points[i]['data']['fit_%s' % y_channel] = yhat
self.data_points[i]['data']['fit_res_%s' % y_channel] = y - yhat # residuals
else:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_%s' % y_channel, yhat)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_res_%s' % y_channel, y - yhat) # residuals
self.data_points[i]['data_headers']['fit_type'] = "Gaussian"
self.data_points[i]['data_headers']['fit_x_channel'] = x_channel
self.data_points[i]['data_headers']['fit_y_channel'] = y_channel
self.data_points[i]['data_headers']['fitted_y_channel'] = 'fit_%s' % y_channel
self.data_points[i]['data_headers']['fit_res_y_channel'] = 'fit_res_%s' % y_channel
self.data_points[i]['data_headers']['fit_coeffs'] = popt
self.data_points[i]['data_headers']['fit_r2'] = ssreg_fit / sstot_fit
self.data_points[i]['data_headers']['fit_sse'] = sserr_fit
self.data_points[i]['data_headers']['fit_r2_fullrange'] = ssreg / sstot # _fullrange takes the full range for error calculation (i.e. not taking into account x_limit)
self.data_points[i]['data_headers']['fit_sse_fullrange'] = sserr
for j in range(num_peaks):
self.data_points[i]['data_headers']['A_%d' % j] = popt[j*3+0]
self.data_points[i]['data_headers']['xpos_%d' % j] = popt[j*3+1]
self.data_points[i]['data_headers']['sigma_%d' % j] = popt[j*3+2]
def fit_arbitrary(self, x_channel='bias_(v)', y_channel='current_(a)', f=None, params=None, deg=1, x_limit=[], **kwargs):
"""Fits arbitrary curves for eahc point in the data set.
Uses scipy.optimize.curve_fit. Adds 'fit_type' and 'fit_coeffs' to the data_headers.
Args:
x_channel (string): name of channel to use for x-axis.
y_channel (string): name of channel to use for y-axis.
f (function): Any fit function that can be passed to scipy.optimize.curve_fit. If None, then a linear polyfit will be used (the argument deg should be specified then).
params (list): Parameters to use in the fit function.
deg (integerer): Degree of the fitting polynomial.
x_limit (tuple): Tuple with lower and upper x-limit, specifies whether to restrict the fitting for a range of x_values.
**kwargs: Extra keyword arguments to be passed to scipy.optimize.curve_fit or numpy.polyfit.
Also extracts the 'fit_sse' (sum of squares due to error), and 'fit_r2' (the r squared value). If x_limit is specified, also fit_sse_fullrange and fit_r2_fullrange will be calculated to represent the values for the full x range.
These will be added to the data_header for each point.
Also adds the amplitude mean and standard deviation ('amplitude_mean_(m)' and 'amplitude_stddev_(m)') - if the data exists. Further the fitted line and the residuals are added to the data sweeps.
"""
# some sanity checks
p0names = self.data_points[0]['data'].dtype.names
for ch in [x_channel, y_channel]:
if not ch in p0names:
print("Error: Channel '%s' not found in the data." % ch)
return False
for i,p in enumerate(self.data_points):
x = p['data'][x_channel]
y = p['data'][y_channel]
if 'amplitude_[avg]_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_[avg]_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_[avg]_(m)'])
elif 'amplitude_(m)' in p['data'].dtype.names:
self.data_points[i]['data_headers']['amplitude_mean_(m)'] = np.mean(p['data']['amplitude_(m)'])
self.data_points[i]['data_headers']['amplitude_stddev_(m)'] = np.std(p['data']['amplitude_(m)'])
if len(x_limit)==2: # set limit for fits
x_limit_i = [ (np.abs(x-x_limit[0])).argmin() , (np.abs(x-x_limit[1])).argmin()]
if x_limit_i[0] > x_limit_i[1]:
x_limit_i[0], x_limit_i[1] = x_limit_i[1], x_limit_i[0]
self.data_points[i]['data_headers']['fit_x_limit_i_start'] = x[x_limit_i[0]]
self.data_points[i]['data_headers']['fit_x_limit_i_end'] = x[x_limit_i[1]]
x_fit = x[x_limit_i[0]:x_limit_i[1]+1]
y_fit = y[x_limit_i[0]:x_limit_i[1]+1]
else:
x_fit = x
y_fit = y
if f == None:
popt = np.polyfit(x_fit, y_fit, deg=deg)
flin = np.poly1d(popt)
yhat = flin(x)
yhat_fit = flin(x_fit)
self.data_points[i]['data_headers']['fit_type'] = 'linear_degree_%s' % deg
else:
try:
popt, pcov = scipy.optimize.curve_fit(f, x_fit, y_fit, **kwargs)
except RuntimeError:
popt = initial_params
print("Error in curve_fit for point %d." % i)
yhat = f(x, *popt)
yhat_fit = f(x_fit, *popt)
self.data_points[i]['data_headers']['fit_type'] = f.__name__
ybar = np.sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2)
sstot = np.sum((y - ybar)**2)
sserr = np.sum((y - yhat)**2)
ybar_fit = np.sum(y_fit)/len(y_fit)
ssreg_fit = np.sum((yhat_fit-ybar_fit)**2)
sstot_fit = np.sum((y_fit - ybar_fit)**2)
sserr_fit = np.sum((y_fit - yhat_fit)**2)
if 'fit_%s' % y_channel in self.data_points[i]['data'].dtype.names:
self.data_points[i]['data']['fit_%s' % y_channel] = yhat
self.data_points[i]['data']['fit_res_%s' % y_channel] = y - yhat # residuals
else:
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_%s' % y_channel, yhat)
self.data_points[i]['data'] = recfunctions.append_fields(self.data_points[i]['data'],'fit_res_%s' % y_channel, y - yhat) # residuals
self.data_points[i]['data_headers']['fit_x_channel'] = x_channel
self.data_points[i]['data_headers']['fit_y_channel'] = y_channel
self.data_points[i]['data_headers']['fitted_y_channel'] = 'fit_%s' % y_channel
self.data_points[i]['data_headers']['fit_res_y_channel'] = 'fit_res_%s' % y_channel
self.data_points[i]['data_headers']['fit_coeffs'] = popt
self.data_points[i]['data_headers']['fit_r2'] = ssreg_fit / sstot_fit
self.data_points[i]['data_headers']['fit_sse'] = sserr_fit
self.data_points[i]['data_headers']['fit_r2_fullrange'] = ssreg / sstot # _fullrange takes the full range for error calculation (i.e. not taking into account x_limit)
self.data_points[i]['data_headers']['fit_sse_fullrange'] = sserr
def _string_simplify1(self,str):
"""simplifies a string (i.e. removes replaces space for "_", and makes it lowercase"""
return str.replace(' ','_').lower()
def _string_simplify2(self,str):
"""simplifies a string (i.e. removes replaces space for "_", and makes it lowercase"""
return str.replace(' ','_').lower()
class PlotData(object):
"""Class to read grid date from binary or ascii files. Also fits the KPFM parabola and the exponential IZ curves. """
def __init__(self, griddata):
"""we provide a griddate object as input"""
# populate from an GridData object
griddata_ = copy.deepcopy(griddata) # we want copies, so that we do not alter the original griddata object
self.filename = griddata_.filename
self.headers = griddata_.headers
self.data_points = griddata_.data_points
self.num_data_points = griddata_.num_data_points
self.channels = griddata_.channels
self.points = griddata_.points
self.data = griddata_.data
self.im_extent = () # x and y span of image (x_min, x_max, y_min, y_max)
self.im_delta = (1,1) # spacing between points
self.grid_dim = () # grid dimensions in pixels
self.grid_center = () # center coordinates for grid
self.grid_extent = () # grid span in x and y direction
self.grid_rotation = -1 # grid rotation
self.axis_length = False # true if we have physical length info for the data
self.test = [] # for testing events
self.fig = None
self.gs = None # gridspec
self.plots = [] # grid plots
self.plots2D = [] # 2D graphs
self.plots2D2 = [] # 2D graphs for 2nd graph
self.plot_markers = [] # markers on grid plots
self.selected_index = 0 # selected index to plot 2D plots
self.plot_legends = [] # legends for 2D graph
self.plot_sweep_signal = None # channel that is swept
self.plot_xaxis = None # xaxis channel
self.plot_sweep_channels_selected = []
self.plot_sweep_channel_res = None # channel for residuals
def _length_to_indices(self,coords_index):
"""converts length coordinates to index coordinates"""
return (int(coords_index[0]/self.im_delta[0]), int(coords_index[1]/self.im_delta[1]))
def _index_to_length(self,coords_length):
"""converts index coordinates to length coordinates"""
return (coords_length[0]*self.im_delta[0], coords_length[1]*self.im_delta[1])
def _indices_to_index(self,coords_index):
"""converts pixel indices to data point index"""
return coords_index[1] * self.grid_dim[0] + coords_index[0]
def _index_to_indices(self,index):
"""converts data point index to pixel indices"""
return (index % self.grid_dim[0], int(np.floor(index/self.grid_dim[0])))
def _indices_check_bounds(self, coords_index):
"""checks and adjust bounds of pixel coordinates"""
x = min(coords_index[0],self.grid_dim[0])
x = max(coords_index[0],0)
y = min(coords_index[1],self.grid_dim[1])
y = max(coords_index[1],0)
return (x,y)
def _point_distance(self, p1, p2, absolute=True):
"""calculates distance between two points. If absolute is True, then the absolute value will be returned."""
d = np.sqrt((p2['data_headers']['x_(m)']-p1['data_headers']['x_(m)'])**2 + (p2['data_headers']['y_(m)']-p1['data_headers']['y_(m)'])**2)
if absolute:
return abs(d)
else:
return d
def _calc_grid_info(self):
"""Computes grid information"""
warn = []
self.axis_length = False
if 'grid_settings' in self.headers:
self.grid_center = (self.headers['grid_settings'][0],self.headers['grid_settings'][1])
self.grid_extent = (self.headers['grid_settings'][2],self.headers['grid_settings'][3])
self.grid_rotation = self.headers['grid_settings'][4]
self.grid_dim = self.headers['grid_dim']
self.im_extent = (0,self.grid_extent[0]*1e9,0,self.grid_extent[1]*1e9) # in nanometers
self.im_delta = ((self.im_extent[1]-self.im_extent[0])/self.grid_dim[0], (self.im_extent[3]-self.im_extent[2])/self.grid_dim[1])
self.axis_length = True
else:
self.im_delta = (1,1) # for points. we will try to extract the length later
# analyze length data
warn.append("grid_unknown")
dx = self._point_distance(self.data_points[0],self.data_points[1])
# initial values, should be changed in the loop below
n_sqrt = int(np.sqrt(len(self.data_points)))
nx, ny = n_sqrt, n_sqrt
dy = self._point_distance(self.data_points[ny],self.data_points[0])
for i in range(len(self.data_points[:-1])):
d = self._point_distance(self.data_points[i],self.data_points[i+1])
if abs(d-dx) > 1e-12: # looks like there is a dy step now
nx = i+1
dy = self._point_distance(self.data_points[nx],self.data_points[0])
ny = int(np.ceil(len(self.data_points)/nx))
break
self.grid_dim = (nx, ny)
self.im_extent = (0, nx*dx*1e9, 0, ny*dy*1e9) # estimating the extent of the image, convert to nanometers
self.im_delta = (dx*1e9, dy*1e9) # converting to nm
self.grid_center = (self.data_points[0]['data_headers']['x_(m)'] + nx/2 * dx, self.data_points[0]['data_headers']['y_(m)'] + ny/2 * dy)
dx_p1p0 = self.data_points[1]['data_headers']['x_(m)']-self.data_points[0]['data_headers']['x_(m)'] # x-difference between point 1 and point 0
dy_p1p0 = self.data_points[1]['data_headers']['y_(m)']-self.data_points[0]['data_headers']['y_(m)'] # y-difference between point 1 and point 0
self.grid_rotation = (np.arctan2(dy_p1p0, dx_p1p0) * 180 / np.pi + 360) % 360 # the "+360%360" ensures that the angle is always positive
self.axis_length = True
if (nx * ny != len(self.data_points)): # we can try to extract some length data
warn.append("grid_unknown_problem")
return warn
def _widgets_update_vars(self):
"""updates class variables with GUI settings"""
self.selected_index = self.slider_point.value - 1
self.plot_sweep_channels_selected = self.select_channels.value
self.plot_xaxis = self.select_xaxis.value
self.plot_sweep_channels_selected2 = self.select_channels2.value
self.plot_xaxis2 = self.select_xaxis2.value
def _widgets_grid(self,name=None,state=False):
"""toggle grid in plots"""
if not name: # no name and no state given, so we switch
self.button_grid.value = not self.button_grid.value
state = self.button_grid.value
for p in self.plots:
p.grid(state)
if PYTHON_VERSION==2: plt.draw()
def _widgets_legend(self,name=None,state=False):
"""toggle legends in plots"""
if not name: # no name and no state given, so we switch
self.button_legend.value = not self.button_legend.value
state = self.button_legend.value
for legend in self.plot_legends:
legend.set_visible(state)
if PYTHON_VERSION==2: plt.draw()
def _slider_change(self):
"""when slider changes, update markers and plot"""
self.selected_index = self.slider_point.value - 1
self._widgets_change()
def _widgets_change_marker(self):
"""when marker display changes"""
self._widgets_marker()
def _widgets_change(self):
"""when a widget changes, update markers and plot"""
self.slider_point.value = self.selected_index + 1
self._widgets_update_vars()
self._widgets_marker(redraw=False)
self._widgets_plot_change()
def _widgets_plot_change(self):
"""widgets regarding plot changes, update plot"""
self._widgets_update_vars()
self.plot_sweep_data()
if PYTHON_VERSION==2: plt.draw()
def _widgets_marker(self, redraw=True):
"""toggle marker in plots"""
x,y = self._index_to_length(self._index_to_indices(self.slider_point.value-1))
w,h = self._index_to_length((1,1))
for i in reversed(range(len(self.plot_markers))):
self.plot_markers[i].remove()
del self.plot_markers[i]
if self.button_marker.value:
for p in self.plots:
self.plot_markers.append(p.add_patch(matplotlib.patches.Rectangle((x, y), w, h, fill=False, snap=False, edgecolor='#900000', linewidth=1)))
if self.button_marker_circle.value:
for p in self.plots:
w2 = w*2
self.plot_markers.append(p.add_patch(matplotlib.patches.Circle((x+w/2,y+h/2), w2, fill=True, snap=False, facecolor='#f00000',linewidth=1, edgecolor='#900000', alpha=0.33)))
if PYTHON_VERSION==2 and redraw: plt.draw()
def _plot_onclick(self,event):
"""onclick event for plot"""
if event.xdata == None or event.ydata == None: return False
if self.axis_length:
pixel_coord = self._length_to_indices((event.xdata, event.ydata)) # convert to pixel index
else:
pixel_coord = (int(event.xdata), int(event.ydata)) # x and y coordinates are point indices
pixel_coord = self._indices_check_bounds(pixel_coord) # check if we are out of range, and adjust if necessary
self.selected_index = self._indices_to_index(pixel_coord)
self._widgets_change()
def _plot_onkeypress(self,event):
"""changes selected points according to the cursor arrows"""
if event.key == "right":
new_index = self.selected_index + 1
elif event.key == "left":
new_index = self.selected_index - 1
elif event.key == "up":
new_index = self.selected_index + self.grid_dim[0]
elif event.key == "down":
new_index = self.selected_index - self.grid_dim[0]
elif event.key == "m":
self.button_marker.value = not self.button_marker.value
self._widgets_marker()
return
elif event.key == "M":
self.button_marker_circle.value = not self.button_marker_circle.value
self._widgets_marker()
return
elif event.key == "i":
self._widgets_legend()
return
elif event.key == "g":
self._widgets_grid()
return
else: