-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathhg64.mac
2226 lines (2105 loc) · 91.8 KB
/
hg64.mac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
; Copyright 2001-2017 - Mersenne Research, Inc. All rights reserved
; Author: George Woltman
; Email: [email protected]
;
; These macros implement 64-bit SSE2 optimized versions of macros found
; in hg.mac. We make use of the 8 extra registers.
xfive_reals_fft_preload MACRO
xload xmm8, XMM_P618 ;; (.588/.951)
xload xmm9, XMM_P309 ;; .309
xload xmm10, XMM_P951 ;; .951
xload xmm11, XMM_P588 ;; .588
xload xmm12, XMM_M809 ;; -.809
xload xmm13, XMM_M262 ;; (-.809/.309)
xload xmm14, XMM_M162 ;; (-.951/.588)
xload xmm15, XMM_M382 ;; (.309/-.809)
ENDM
x5r_fft MACRO r1, r2, r3, r4, r5, t1, t2, t3
xcopy t1, r5 ;; 0-5 Copy R5
addpd r5, r2 ;; 1-4 T1 = R2 + R5
xcopy t2, r4 ;; 2-7 Copy R4
addpd r4, r3 ;; 3-5 T2 = R3 + R4
xcopy t3, r1 ;; 4-9 newR2 = R1
subpd r2, t1 ;; 6-9 T3 = R2 - R5
xcopy t1, r1 ;; 7-12 newR3 = R1
subpd r3, t2 ;; 8-11 T4 = R3 - R4
xcopy t2, xmm8 ;; 9-14 const (.588/.951)
addpd r1, r5 ;; 10-13 newR1 = R1 + T1
mulpd r5, xmm9 ;; 11-16 T1 = T1 * .309
mulpd r2, xmm10 ;; 13-18 T3 = T3 * .951 (new I2)
addpd r1, r4 ;; 14-17 newR1 = newR1 + T2
mulpd r3, xmm11 ;; 15-20 T4 = T4 * .588
addpd t3, r5 ;; 17-20 newR2 = newR2 + T1
mulpd r4, xmm12 ;; 18-23 T2 = T2 * -.809
mulpd r5, xmm13 ;; 20-25 T1 = T1 * (-.809/.309)
mulpd t2, r2 ;; 22-27 T3 = T3 * (.588/.951)
addpd r2, r3 ;; 23-26 newI2 = newI2 + T4
mulpd r3, xmm14 ;; 24-29 T4 = T4 * (-.951/.588)
addpd t3, r4 ;; 25-28 newR2 = newR2 + T2
mulpd r4, xmm15 ;; 26-31 T2 = T2 * (.309/-.809)
addpd t1, r5 ;; 27-30 newR3 = newR3 + T1
addpd t2, r3 ;; 30-33 T3 = T3 + T4 (final I3)
addpd t1, r4 ;; 32-35 newR3 = newR3 + T2
ENDM
xfive_reals_unfft_preload MACRO
xload xmm8, XMM_P309 ;; Load .309
xload xmm9, XMM_M809 ;; Load -.809
xload xmm10, XMM_P951 ;; Load 0.951
xload xmm11, XMM_P588 ;; Load 0.588
ENDM
x5r_unfft MACRO r1, r2, r3, r4, r5, t1, t2, t3, mem1
xcopy t1, xmm8 ;; Load .309
mulpd t1, r2 ;; 1-6 R2*.309
xcopy t2, xmm9 ;; Load -.809
mulpd t2, r2 ;; 3-8 R2*-.809
addpd r2, r4 ;; 4-7 R2+R3
xcopy t3, xmm9 ;; Load -.809
mulpd t3, r4 ;; 5-10 R3*-.809
addpd r2, r1 ;; 6-9 R1+R2+R3 (final R1)
mulpd r4, xmm8 ;; 7-12 R3*.309
addpd t1, r1 ;; 8-11 R1 + R2*.309
xstore mem1, r2 ;; Save final R1
xcopy r2, xmm10 ;; Load 0.951
mulpd r2, r3 ;; 9-14 I2*.951
addpd t2, r1 ;; 10-13 R1 + R2*-.809
xcopy r1, xmm11 ;; Load 0.588
mulpd r1, r5 ;; 11-16 I3*.588
addpd t1, t3 ;; 12-15 R1 + R2*.309 - R3*.809
mulpd r3, xmm11 ;; 13-18 I2*.588
addpd t2, r4 ;; 14-17 R1 - R2*.809 + R3*.309
mulpd r5, xmm10 ;; 15-20 I3*-.951
xcopy t3, t1 ;; 16-21 R1 + R2*.309 - R3*.809
addpd r2, r1 ;; 17-20 I2*.951 + I3*.588
xcopy r4, t2 ;; 18-23 R1 - R2*.809 + R3*.309
subpd r3, r5 ;; 21-24 I2*.588 - I3*.951
addpd t1, r2 ;; 23-26 final R2
subpd t3, r2 ;; 25-28 final R5
addpd t2, r3 ;; 27-30 final R3
subpd r4, r3 ;; 29-31 final R4
ENDM
x4c_fft MACRO r1, r2, r3, r4, r5, r6, r7, r8, mem8, screg, off, pre1, pre2, dst1, dst2
xload r8, [screg+off+32+16] ;; cosine/sine
mulpd r8, r3 ;; A3 = R3 * cosine/sine ;1-6
subpd r8, r7 ;; A3 = A3 - I3 ;8-11
mulpd r7, [screg+off+32+16] ;; B3 = I3 * cosine/sine ;3-8
addpd r7, r3 ;; B3 = B3 + R3 ;10-13
xload r3, [screg+off+0+16] ;; cosine/sine
mulpd r3, r2 ;; A2 = R2 * cosine/sine ;5-10
subpd r3, r6 ;; A2 = A2 - I2 ;12-15
mulpd r6, [screg+off+0+16] ;; B2 = I2 * cosine/sine ;9-14
addpd r6, r2 ;; B2 = B2 + R2 ;16-19
xload r2, [screg+off+64+16] ;; cosine/sine
mulpd r2, mem8 ;; B4 = I4 * cosine/sine ;11-16
addpd r2, r4 ;; B4 = B4 + R4 ;18-21
mulpd r4, [screg+off+64+16] ;; A4 = R4 * cosine/sine ;7-12
subpd r4, mem8 ;; A4 = A4 - I4 ;14-17
mulpd r8, [screg+off+32] ;; A3 = A3 * sine (new R3) ;13-18
xcopy xmm8, r1
mulpd r7, [screg+off+32] ;; B3 = B3 * sine (new I3) ;15-20
xcopy xmm9, r5
mulpd r3, [screg+off+0] ;; A2 = A2 * sine (new R2) ;17-22
mulpd r4, [screg+off+64] ;; A4 = A4 * sine (new R4) ;19-24
xprefetchw [pre1]
subpd r1, r8 ;; R1 = R1 - R3 (new R3) ;20-23
mulpd r6, [screg+off+0] ;; B2 = B2 * sine (new I2) ;21-26
subpd r5, r7 ;; I1 = I1 - I3 (new I3) ;22-25
mulpd r2, [screg+off+64] ;; B4 = B4 * sine (new I4) ;23-28
xprefetchw [pre1][pre2]
addpd r8, xmm8 ;; R3 = R1 + R3 (new R1) ;24-27
xcopy xmm8, r3
subpd r3, r4 ;; R2 = R2 - R4 (new R4) ;26-29
addpd r7, xmm9 ;; I3 = I1 + I3 (new I1) ;28-31
xcopy xmm9, r6
subpd r6, r2 ;; I2 = I2 - I4 (new I4) ;30-33
xcopy xmm10, r5
subpd r5, r3 ;; I3 = I3 - R4 (final I4) ;32-35
xcopy xmm11, r1
addpd r4, xmm8 ;; R4 = R2 + R4 (new R2) ;34-37
addpd r2, xmm9 ;; I4 = I2 + I4 (new I2) ;36-39
IFNB <dst1>
xstore dst1, r5
ENDIF
subpd r1, r6 ;; R3 = R3 - I4 (final R3) ;38-41
xcopy xmm8, r8
subpd r8, r4 ;; R1 = R1 - R2 (final R2) ;40-43
xcopy xmm9, r7
subpd r7, r2 ;; I1 = I1 - I2 (final I2) ;42-45
IFNB <dst2>
xstore dst2, r1
ENDIF
addpd r3, xmm10 ;; R4 = I3 + R4 (final I3) ;44-47
addpd r6, xmm11 ;; I4 = R3 + I4 (final R4) ;46-49
addpd r4, xmm8 ;; R2 = R1 + R2 (final R1) ;48-51
addpd r2, xmm9 ;; I2 = I1 + I2 (final I1) ;50-53
ENDM
x4c_fft_mem MACRO R1,R2,R3,R4,R5,R6,R7,R8,screg,off,pre1,pre2,dst1,dst2
xload xmm0, R3 ;; R3
xload xmm1, [screg+off+32+16] ;; cosine/sine
mulpd xmm1, xmm0 ;; A3 = R3 * cosine/sine ;1-6
xload xmm2, R7 ;; I3
xload xmm3, [screg+off+32+16] ;; cosine/sine
mulpd xmm3, xmm2 ;; B3 = I3 * cosine/sine ;3-8
xload xmm4, R2 ;; R2
xload xmm6, [screg+off+0+16] ;; cosine/sine
mulpd xmm4, xmm6 ;; A2 = R2 * cosine/sine ;5-10
xload xmm5, R4 ;; R4
xload xmm7, [screg+off+64+16] ;; cosine/sine
mulpd xmm5, xmm7 ;; A4 = R4 * cosine/sine ;7-12
subpd xmm1, xmm2 ;; A3 = A3 - I3 ;8-11
xload xmm2, R6 ;; I2
mulpd xmm6, xmm2 ;; B2 = I2 * cosine/sine ;9-14
addpd xmm3, xmm0 ;; B3 = B3 + R3 ;10-13
xload xmm0, R8 ;; I4
mulpd xmm7, xmm0 ;; B4 = I4 * cosine/sine ;11-16
subpd xmm4, xmm2 ;; A2 = A2 - I2 ;12-15
xload xmm2, [screg+off+32] ;; sine
mulpd xmm1, xmm2 ;; A3 = A3 * sine (new R3) ;13-18
subpd xmm5, xmm0 ;; A4 = A4 - I4 ;14-17
mulpd xmm3, xmm2 ;; B3 = B3 * sine (new I3) ;15-20
addpd xmm6, R2 ;; B2 = B2 + R2 ;16-19
xload xmm0, [screg+off+0] ;; sine
mulpd xmm4, xmm0 ;; A2 = A2 * sine (new R2) ;17-22
xprefetchw [pre1]
addpd xmm7, R4 ;; B4 = B4 + R4 ;18-21
mulpd xmm5, [screg+off+64] ;; A4 = A4 * sine (new R4) ;19-24
xload xmm2, R1 ;; R1
subpd xmm2, xmm1 ;; R1 = R1 - R3 (new R3) ;20-23
mulpd xmm6, xmm0 ;; B2 = B2 * sine (new I2) ;21-26
xload xmm0, R5 ;; I1
subpd xmm0, xmm3 ;; I1 = I1 - I3 (new I3) ;22-25
mulpd xmm7, [screg+off+64] ;; B4 = B4 * sine (new I4) ;23-28
addpd xmm1, R1 ;; R3 = R1 + R3 (new R1) ;24-27
xprefetchw [pre1][pre2]
xcopy xmm8, xmm4
subpd xmm4, xmm5 ;; R2 = R2 - R4 (new R4) ;26-29
xcopy xmm9, xmm6
addpd xmm3, R5 ;; I3 = I1 + I3 (new I1) ;28-31
subpd xmm6, xmm7 ;; I2 = I2 - I4 (new I4) ;30-33
xcopy xmm10, xmm0
subpd xmm0, xmm4 ;; I3 = I3 - R4 (final I4) ;32-35
addpd xmm5, xmm8 ;; R4 = R2 + R4 (new R2) ;34-37
addpd xmm7, xmm9 ;; I4 = I2 + I4 (new I2) ;36-39
IFNB <dst1>
xstore dst1, xmm0
ENDIF
xcopy xmm8, xmm2
subpd xmm2, xmm6 ;; R3 = R3 - I4 (final R3) ;38-41
xcopy xmm9, xmm1
subpd xmm1, xmm5 ;; R1 = R1 - R2 (final R2) ;40-43
xcopy xmm11, xmm3
subpd xmm3, xmm7 ;; I1 = I1 - I2 (final I2) ;42-45
IFNB <dst2>
xstore dst2, xmm2
ENDIF
addpd xmm4, xmm10 ;; R4 = I3 + R4 (final I3) ;44-47
addpd xmm6, xmm8 ;; I4 = R3 + I4 (final R4) ;46-49
addpd xmm5, xmm9 ;; R2 = R1 + R2 (final R1) ;48-51
addpd xmm7, xmm11 ;; I2 = I1 + I2 (final I1) ;50-53
ENDM
x8r_fft MACRO
xcopy xmm8, xmm3
subpd xmm3, xmm7 ;; new R8 = R4 - R8
addpd xmm7, xmm8 ;; new R4 = R4 + R8
xcopy xmm9, xmm1
subpd xmm1, xmm5 ;; new R6 = R2 - R6
addpd xmm5, xmm9 ;; new R2 = R2 + R6
mulpd xmm3, XMM_SQRTHALF ;; R8 = R8 * square root
mulpd xmm1, XMM_SQRTHALF ;; R6 = R6 * square root
xcopy xmm8, xmm0
subpd xmm0, xmm4 ;; new R5 = R1 - R5
addpd xmm4, xmm8 ;; new R1 = R1 + R5
xcopy xmm9, xmm5
subpd xmm5, xmm7 ;; R2 = R2 - R4 (new & final R4)
xcopy xmm10, xmm2
subpd xmm2, xmm6 ;; new R7 = R3 - R7
addpd xmm6, xmm10 ;; new R3 = R3 + R7
xcopy xmm8, xmm1
subpd xmm1, xmm3 ;; R6 = R6 - R8 (Real part)
xcopy xmm10, xmm4
subpd xmm4, xmm6 ;; R1 = R1 - R3 (new & final R3)
addpd xmm7, xmm9 ;; R4 = R2 + R4 (new R2)
addpd xmm3, xmm8 ;; R8 = R6 + R8 (Imaginary part)
xcopy xmm8, xmm0
subpd xmm0, xmm1 ;; R5 = R5 - R6 (final R7)
addpd xmm6, xmm10 ;; R3 = R1 + R3 (new R1)
xcopy xmm9, xmm2
subpd xmm2, xmm3 ;; R7 = R7 - R8 (final R8)
xcopy xmm10, xmm6
subpd xmm6, xmm7 ;; R1 = R1 - R2 (final R2)
addpd xmm1, xmm8 ;; R6 = R5 + R6 (final R5)
addpd xmm3, xmm9 ;; R8 = R7 + R8 (final R6)
addpd xmm7, xmm10 ;; R2 = R1 + R2 (final R1)
ENDM
x8r_unfft MACRO r1, r2, r3, r4, r5, r6, r7, r8
xcopy xmm8, r6
subpd r6, r8 ;; new R8 = R6 - R8 ;1-4
addpd r8, xmm8 ;; new R7 = R6 + R8 ;3-6
xcopy xmm9, r5
subpd r5, r7 ;; new R6 = R5 - R7 ;5-8
addpd r7, xmm9 ;; new R5 = R5 + R7 ;7-10
xcopy xmm10, r1
subpd r1, r2 ;; new R2 = R1 - R2 ;9-12
addpd r2, xmm10 ;; new R1 = R1 + R2 ;11-14
xcopy xmm8, r6
subpd r6, r5 ;; R8 = R8 - R6 ;13-16
addpd r5, xmm8 ;; R6 = R6 + R8 ;15-18
xcopy xmm9, r1
subpd r1, r4 ;; R2 = R2 - R4 (new R4) ;17-20
mulpd r6, XMM_SQRTHALF ;; R8 = R8 * square root of 1/2 ;18-23
mulpd r5, XMM_SQRTHALF ;; R6 = R6 * square root of 1/2 ;22-27
xcopy xmm10, r2
subpd r2, r3 ;; R1 = R1 - R3 (new R3) ;19-22
addpd r4, xmm9 ;; R4 = R2 + R4 (new R2) ;27-30
xcopy xmm8, r1
subpd r1, r6 ;; newR4 = newR4-newR8(final R8);
addpd r3, xmm10 ;; R3 = R1 + R3 (new R1) ;
xcopy xmm9, r2
subpd r2, r8 ;; R3 = R3 - R7 (final R7) ;
xcopy xmm10, r4
subpd r4, r5 ;; R2 = R2 - R6 (final R6) ;
xcopy xmm11, r3
subpd r3, r7 ;; R1 = R1 - R5 (final R5) ;
addpd r6, xmm8 ;; R8 = R4 + R8 (final R4) ;
addpd r8, xmm9 ;; R7 = R3 + R7 (final R3) ;
addpd r5, xmm10 ;; R6 = R2 + R6 (final R2) ;
addpd r7, xmm11 ;; R5 = R1 + R5 (final R1) ;
ENDM
; This works but doesn't seem to be any faster on a P4.
;
; R1 = R1 + R2 + R3 + R4 + R5 + R6 + R7
; R2 = R1 + (R2+R7)*.623 + (R3+R6)*-.223 + (R4+R5)*-.901
; R3 = R1 + (R2+R7)*-.223 + (R3+R6)*-.901 + (R4+R5)*.623
; R4 = R1 + (R2+R7)*-.901 + (R3+R6)*.623 + (R4+R5)*-.223
; I2 = (R2-R7)*.782 + (R3-R6)*.975 + (R4-R5)*.434
; I3 = (R2-R7)*.975 + (R3-R6)*-.434 + (R4-R5)*-.782
; I4 = (R2-R7)*.434 + (R3-R6)*-.782 + (R4-R5)*.975
; For compatibility with 8 register version store results in:
; destr1 = final R1
; destr4 = final R4
; xmm0 = final R3
; xmm4 = final R2
; xmm3 = final I4
; xmm2 = final I2
; xmm1 = final I3
; Temporaries:
; xmm5 = R2+R7 intermediate values
; xmm6 = R3+R6 intermediate values
; xmm7 = R4+R5 intermediate values
; xmm8 = R2-R7 intermediate values
; xmm9 = R3-R6 intermediate values
; xmm10 = R4-R5 intermediate values
; xmm11 = final R1
; xmm12 = final R4
; xmm13-xmm15 = cached multipliers
;
x7r_fft_mem MACRO m4, m5, m6, m7, destr1, destr4, m4_conflict
xload xmm11, m7 ;; T1 = R7
addpd xmm11, xmm1 ;; T1 = R2+R7
xload xmm14, m6 ;; T2 = R6
addpd xmm14, xmm2 ;; T2 = R3+R6
xload xmm15, m4 ;; T3 = R4
addpd xmm15, m5 ;; T3 = R4+R5
xload xmm5, XMM_P623
mulpd xmm5, xmm11 ;; T1 = T1 * .623
subpd xmm1, m7 ;; S1 = R2-R7
xload xmm6, XMM_P623
mulpd xmm6, xmm14 ;; T2 = T2 * .623
subpd xmm2, m6 ;; S2 = R3-R6
xload xmm7, XMM_P623
mulpd xmm7, xmm15 ;; T3 = T3 * .623
xload xmm3, m4 ;; S3 = R4
subpd xmm3, m5 ;; S3 = R4-R5
xload xmm13, XMM_P975
mulpd xmm1, xmm13 ;; S1 = S1 * .975, newI3=S1
addpd xmm11, xmm0 ;; R1+T1
mulpd xmm2, xmm13 ;; S2 = S2 * .975, newI2=S2
addpd xmm15, xmm14 ;; T2+T3
mulpd xmm3, xmm13 ;; S3 = S3 * .975, newI4=S3
xload xmm14, XMM_M358
xcopy xmm4, xmm0
addpd xmm4, xmm5 ;; newR2 = R1 + T1
mulpd xmm5, xmm14 ;; T1 = T1 * (-.223/.623)
xcopy xmm12, xmm0
addpd xmm12, xmm6 ;; newR4 = R1 + T2
mulpd xmm6, xmm14 ;; T2 = T2 * (-.223/.623)
addpd xmm0, xmm7 ;; newR3 = R1 + T3
mulpd xmm7, xmm14 ;; T3 = T3 * (-.223/.623)
xload xmm8, XMM_P445 ;; (.434/.975)
mulpd xmm8, xmm1 ;; S1 = S1 * (.434/.975)
xload xmm9, XMM_P445 ;; (.434/.975)
mulpd xmm9, xmm2 ;; S2 = S2 * (.434/.975)
xload xmm10, XMM_P445 ;; (.434/.975)
mulpd xmm10, xmm3 ;; S3 = S3 * (.434/.975)
addpd xmm0, xmm5 ;; newR3 = newR3 + T1
addpd xmm4, xmm6 ;; newR2 = newR2 + T2
addpd xmm12, xmm7 ;; newR4 = newR4 + T3
addpd xmm11, xmm15 ;; R1+T1+T2+T3 (final R1)
xload xmm15, XMM_P404
addpd xmm3, xmm8 ;; newI4 = newI4 + S1
mulpd xmm5, xmm15 ;; T1 = T1 * (-.901/-.223)
subpd xmm1, xmm9 ;; newI3 = newI3 - S2
mulpd xmm6, xmm15 ;; T2 = T2 * (-.901/-.223)
addpd xmm2, xmm10 ;; newI2 = newI2 + S3
mulpd xmm7, xmm15 ;; T3 = T3 * (-.901/-.223)
xload xmm13, XMM_P180
mulpd xmm8, xmm13 ;; S1 = S1 * (.782/.434)
addpd xmm12, xmm5 ;; newR4 = newR4 + T1 (final R4)
mulpd xmm9, xmm13 ;; S2 = S2 * (.782/.434)
addpd xmm0, xmm6 ;; newR3 = newR3 + T2 (final R3)
mulpd xmm10, xmm13 ;; S3 = S3 * (.782/.434)
addpd xmm4, xmm7 ;; newR2 = newR2 + T3 (final R2)
xstore destr1, xmm11 ;; Save R1
xstore destr4, xmm12 ;; Save R4
addpd xmm2, xmm8 ;; newI2 = newI2 + S1 (final I2)
subpd xmm3, xmm9 ;; newI4 = newI4 - S2 (final I4)
subpd xmm1, xmm10 ;; newI3 = newI3 - S3 (final I3)
ENDM
x7r_fft MACRO r1, r2, r3, r4, r5, r6, r7, t1, memr1
xcopy xmm8, r2
subpd r2, r7 ;; R2-R7
xcopy xmm9, r3
subpd r3, r6 ;; R3-R6
xcopy xmm10, r4
subpd r4, r5 ;; R4-R5
addpd r7, xmm8 ;; T1 = R2+R7
addpd r6, xmm9 ;; T2 = R3+R6
addpd r5, xmm10 ;; T3 = R4+R5
xcopy t1, r1 ;; R1
addpd t1, r7 ;; R1+T1
addpd t1, r6 ;; R1+T1+T2
addpd t1, r5 ;; R1+T1+T2+T3 (final R1)
xstore memr1, t1
mulpd r7, XMM_P623 ;; T1 = T1 * .623
mulpd r6, XMM_P623 ;; T2 = T2 * .623
mulpd r5, XMM_P623 ;; T3 = T3 * .623
xcopy xmm8, r2
xcopy r2, r1
xcopy t1, r1
addpd r1, r7 ;; newR2 = R1 + T1
addpd r2, r5 ;; newR3 = R1 + T3
addpd t1, r6 ;; newR4 = R1 + T2
mulpd r7, XMM_M358 ;; T1 = T1 * (-.223/.623)
mulpd r6, XMM_M358 ;; T2 = T2 * (-.223/.623)
mulpd r5, XMM_M358 ;; T3 = T3 * (-.223/.623)
addpd r1, r6 ;; newR2 = newR2 + T2
addpd r2, r7 ;; newR3 = newR3 + T1
addpd t1, r5 ;; newR4 = newR4 + T3
mulpd r7, XMM_P404 ;; T1 = T1 * (-.901/-.223)
mulpd r6, XMM_P404 ;; T2 = T2 * (-.901/-.223)
mulpd r5, XMM_P404 ;; T3 = T3 * (-.901/-.223)
addpd r1, r5 ;; newR2 = newR2 + T3 (final R2)
addpd r2, r6 ;; newR3 = newR3 + T2 (final R3)
addpd t1, r7 ;; newR4 = newR4 + T1 (final R4)
xcopy r7, xmm8 ;; T1 = R2-R7
mulpd r7, XMM_P975 ;; T1 = T1 * .975
mulpd r3, XMM_P975 ;; T2 = T2 * .975
mulpd r4, XMM_P975 ;; T3 = T3 * .975
xcopy xmm9, r2 ;; final R3
xcopy r2, r3 ;; newI2 = T2
xcopy r6, r7 ;; newI3 = T1
xcopy r5, r4 ;; newI4 = T3
mulpd r7, XMM_P445 ;; T1 = T1 * (.434/.975)
mulpd r3, XMM_P445 ;; T2 = T2 * (.434/.975)
mulpd r5, XMM_P445 ;; T3 = T3 * (.434/.975)
addpd r2, r5 ;; newI2 = newI2 + T3
subpd r6, r3 ;; newI3 = newI3 - T2
addpd r4, r7 ;; newI4 = newI4 + T1
mulpd r7, XMM_P180 ;; T1 = T1 * (.782/.434)
mulpd r3, XMM_P180 ;; T2 = T2 * (.782/.434)
mulpd r5, XMM_P180 ;; T3 = T3 * (.782/.434)
addpd r2, r7 ;; newI2 = newI2 + T1 (final I2)
subpd r6, r5 ;; newI3 = newI3 - T3 (final I3)
subpd r4, r3 ;; newI4 = newI4 - T2 (final I4)
xcopy r5, xmm9 ;; final R3
ENDM
x7r_unfft_mem MACRO r1, r2, r3, r4, r5, r6, r7, t1, memr3, memr7, outmemr1
xcopy t1, r1 ;; R1
addpd t1, r2 ;; R1 + R2
addpd t1, r4 ;; R1 + R2 + R3
addpd t1, r6 ;; R1 + R2 + R3 + R4 (final R1)
xstore outmemr1, t1 ;; Save final R1
xcopy r7, r1 ;; A2 = R1
xcopy t1, r1 ;; A3 = R1
mulpd r2, XMM_P623 ;; S2 = R2 * .623
mulpd r4, XMM_P623 ;; S3 = R3 * .623
mulpd r6, XMM_P623 ;; S4 = R4 * .623
addpd r7, r2 ;; A2 = A2 + S2
addpd t1, r6 ;; A3 = A3 + S4
addpd r1, r4 ;; A4 = A4 + S3
mulpd r2, XMM_M358 ;; S2 = S2 * (-.223/.623)
mulpd r4, XMM_M358 ;; S3 = S3 * (-.223/.623)
mulpd r6, XMM_M358 ;; S4 = S4 * (-.223/.623)
addpd r7, r4 ;; A2 = A2 + S3
addpd t1, r2 ;; A3 = A3 + S2
addpd r1, r6 ;; A4 = A4 + S4
mulpd r2, XMM_P404 ;; S2 = S2 * (-.901/-.223)
mulpd r4, XMM_P404 ;; S3 = S3 * (-.901/-.223)
mulpd r6, XMM_P404 ;; S4 = S4 * (-.901/-.223)
addpd r7, r6 ;; A2 = A2 + S4
addpd t1, r4 ;; A3 = A3 + S3
addpd r1, r2 ;; A4 = A4 + S2
xcopy xmm8, r7 ;; Save A2
xload r3, memr3 ;; Load I2
xload r7, memr7 ;; Load I3
mulpd r3, XMM_P975 ;; T2 = I2*.975
mulpd r5, XMM_P975 ;; T3 = I3*.975
mulpd r7, XMM_P975 ;; T4 = I4*.975
xcopy r6, r5 ;; B2 = T3
xcopy r2, r3 ;; B3 = T2
xcopy r4, r7 ;; B4 = T4
mulpd r3, XMM_P445 ;; T2 = T2 * (.434/.975)
mulpd r5, XMM_P445 ;; T3 = T3 * (.434/.975)
mulpd r7, XMM_P445 ;; T4 = T4 * (.434/.975)
addpd r6, r7 ;; B2 = B2 + T4
subpd r2, r5 ;; B3 = B3 - T3
addpd r4, r3 ;; B4 = B4 + T2
mulpd r3, XMM_P180 ;; T2 = T2 * (.782/.434)
mulpd r5, XMM_P180 ;; T3 = T3 * (.782/.434)
mulpd r7, XMM_P180 ;; T4 = T4 * (.782/.434)
addpd r6, r3 ;; B2 = B2 + T2
xcopy r3, xmm8 ;; Reload A2
subpd r2, r7 ;; B3 = B3 - T4
subpd r4, r5 ;; B4 = B4 - T3
subpd r3, r6 ;; A2 = A2 - B2 (final R7)
xcopy xmm9, t1
subpd t1, r2 ;; A3 = A3 - B3 (final R6)
xcopy xmm10, r1
subpd r1, r4 ;; A4 = A4 - B4 (final R5)
addpd r6, xmm8 ;; B2 = A2 + B2 (final R2)
addpd r2, xmm9 ;; B3 = A3 + B3 (final R3)
addpd r4, xmm10 ;; B4 = A4 + B4 (final R4)
ENDM
x7r_unfft MACRO r1, r2, r3, r4, r5, r6, r7, t1, memr1
xcopy t1, r1 ;; R1
addpd t1, r2 ;; R1 + R2
addpd t1, r4 ;; R1 + R2 + R3
addpd t1, r6 ;; R1 + R2 + R3 + R4 (final R1)
mulpd r3, XMM_P975 ;; T2 = I2*.975
mulpd r5, XMM_P975 ;; T3 = I3*.975
mulpd r7, XMM_P975 ;; T4 = I4*.975
xstore memr1, t1 ;; Save final R1
xcopy xmm8, r2 ;; Save R2
xcopy xmm9, r4 ;; Save R3
xcopy t1, r5 ;; B2 = T3
xcopy r2, r3 ;; B3 = T2
xcopy r4, r7 ;; B4 = T4
mulpd r3, XMM_P445 ;; T2 = T2 * (.434/.975)
mulpd r5, XMM_P445 ;; T3 = T3 * (.434/.975)
mulpd r7, XMM_P445 ;; T4 = T4 * (.434/.975)
addpd t1, r7 ;; B2 = B2 + T4
subpd r2, r5 ;; B3 = B3 - T3
addpd r4, r3 ;; B4 = B4 + T2
mulpd r3, XMM_P180 ;; T2 = T2 * (.782/.434)
mulpd r5, XMM_P180 ;; T3 = T3 * (.782/.434)
mulpd r7, XMM_P180 ;; T4 = T4 * (.782/.434)
addpd t1, r3 ;; B2 = B2 + T2
subpd r2, r7 ;; B3 = B3 - T4
subpd r4, r5 ;; B4 = B4 - T3
xcopy r3, xmm8 ;; Reload R2
xcopy r5, xmm9 ;; Reload R3
xcopy xmm8, t1 ;; Save B2
xcopy r7, r1 ;; A2 = R1
xcopy t1, r1 ;; A3 = R1
mulpd r3, XMM_P623 ;; S2 = R2 * .623
mulpd r5, XMM_P623 ;; S3 = R3 * .623
mulpd r6, XMM_P623 ;; S4 = R4 * .623
addpd r7, r3 ;; A2 = A2 + S2
addpd t1, r6 ;; A3 = A3 + S4
addpd r1, r5 ;; A4 = A4 + S3
mulpd r3, XMM_M358 ;; S2 = S2 * (-.223/.623)
mulpd r5, XMM_M358 ;; S3 = S3 * (-.223/.623)
mulpd r6, XMM_M358 ;; S4 = S4 * (-.223/.623)
addpd r7, r5 ;; A2 = A2 + S3
addpd t1, r3 ;; A3 = A3 + S2
addpd r1, r6 ;; A4 = A4 + S4
mulpd r3, XMM_P404 ;; S2 = S2 * (-.901/-.223)
mulpd r5, XMM_P404 ;; S3 = S3 * (-.901/-.223)
mulpd r6, XMM_P404 ;; S4 = S4 * (-.901/-.223)
addpd r7, r6 ;; A2 = A2 + S4
addpd t1, r5 ;; A3 = A3 + S3
addpd r1, r3 ;; A4 = A4 + S2
xcopy r3, xmm8 ;; Reload B2
xcopy xmm8, r7
subpd r7, r3 ;; A2 = A2 - B2 (final R7)
xcopy xmm9, t1
subpd t1, r2 ;; A3 = A3 - B3 (final R6)
xcopy xmm10, r1
subpd r1, r4 ;; A4 = A4 - B4 (final R5)
addpd r3, xmm8 ;; B2 = A2 + B2 (final R2)
addpd r2, xmm9 ;; B3 = A3 + B3 (final R3)
addpd r4, xmm10 ;; B4 = A4 + B4 (final R4)
ENDM
;; Cheat sheet for scheduling dependency chains (and num registers required)
;; 12345678901234567890123456789012345678901234567890123456789012345678901234567890
;;B3 MMMMMAAAMMMMM
;;A3 MMMMMAAAMMMMM
;;B4 MMMMMAAAMMMMM
;;B2 MMMMMAAAMMMMM
;;A4 MMMMMAAAMMMMM
;;A2 MMMMMAAAMMMMM to 12 back to 6
;;nxt B3 MMMMMAAAMMMMM
;;nxt A3 MMMMMAAAMMMMM to 10
;;mR3(depA3) AAA +1
;;mR1(depA3) AAA +1
;;mI3(depB3) AAA
;;mI1(depB3) AAA
;;mI4(depB2B4) AAA
;;mR4(depA2A4) AAA
;;mI2(depB2B4) AAA
;;mR2(depA2A4) AAA
;;i4(depmI3mR4) AAA
;;r3(depmR3mI4) AAA
;;r1(depmR1mR2) AAA
;;r2(depmR1mR2) AAA
;;i3(depmI3mR4) AAA
;;r4(depmR3mI4) AAA 6 can be stored
;;nxt A2 MMMMMAAAMMMMM
;;nxt B2 MMMMMAAAMMMMM
;;nxt A4 MMMMMAAAMMMMM
;;nxt B4 MMMMMAAAMMMMM to 12 back to 6
;;i2(depmI1mI2) AAA
;;i1(depmI1mI2) AAA 2 storeable
;;nxt mR3(depA3) AAA +1
;;nxt mR1(depA3) AAA +1
;;nxt mI3(depB3) AAA
;;nxt mI1(depB3) AAA
;;nxt mI4(depB2B4) AAA
;;nxt mR4(depA2A4) AAA
;;nxt mI2(depB2B4) AAA
;;nxt mR2(depA2A4) AAA
;;nxt i4(depmI3mR4) AAA
;;nxt r3(depmR3mI4) AAA
;;nxt r1(depmR1mR2) AAA
;;nxt r2(depmR1mR2) AAA
;;nxt i3(depmI3mR4) AAA
;;nxt r4(depmR3mI4) AAA
;;nxt i2(depmI1mI2) AAA
;;nxt i1(depmI1mI2) AAA
x4cl_four_complex_fft MACRO srcreg,srcinc,d1,d2,screg
x4cl_fft_cmn srcreg,srcinc,d1,d2,screg,0,32,64,XMM_SCD,XMM_SCD+32,XMM_SCD+64
bump srcreg, srcinc
ENDM
x4cl_fft_cmn MACRO srcreg,srcinc,d1,d2,screg,off2,off3,off4,off6,off7,off8
xload xmm0, [srcreg+d2] ;; R3 ;P4 ;Core 2
xload xmm1, [screg+off3+16] ;; cosine/sine
xcopy xmm2, xmm0 ;; Copy R3 ;3456789ABCDEF
mulpd xmm0, xmm1 ;; A3 = R3 * cosine/sine ;1-6 ;1-5
xload xmm3, [srcreg+d2+16] ;; I3 ;456789ABCDEF
mulpd xmm1, xmm3 ;; B3 = I3 * cosine/sine ;3-8 ;2-6
xload xmm4, [srcreg+d1] ;; R2
xload xmm5, [screg+off2+16] ;; cosine/sine
xcopy xmm6, xmm4 ;; Copy R2 ;789ABCDEF
mulpd xmm4, xmm5 ;; A2 = R2 * cosine/sine ;5-10 ;3-7
xload xmm7, [srcreg+d1+16] ;; I2 ;89ABCDEF
mulpd xmm5, xmm7 ;; B2 = I2 * cosine/sine ;7-12 ;4-8
xload xmm8, [srcreg+d2+d1] ;; R4
xload xmm9, [screg+off4+16] ;; cosine/sine
xcopy xmm10, xmm8 ;; Copy R4 ;BCDEF
mulpd xmm8, xmm9 ;; A4 = R4 * cosine/sine ;9-14 ;5-9
subpd xmm0, xmm3 ;; A3 = A3 - I3 ;8-11 ;6-8 ;3BCDEF
xload xmm11, [srcreg+d2+d1+16];; I4 ;3CDEF
mulpd xmm9, xmm11 ;; B4 = I4 * cosine/sine ;11-16 ;6-10
addpd xmm1, xmm2 ;; B3 = B3 + R3 ;10-13 ;7-9 ;23CDEF
xload xmm12, [srcreg+d2+32] ;; nxt R3
xload xmm13, [screg+off7+16] ;; nxt cosine/sine
xcopy xmm14, xmm12 ;; Copy nxt R3 ;23F
mulpd xmm12, xmm13 ;; nxt A3 = R3 * cosine/sine ;13-18 ;7-11
subpd xmm4, xmm7 ;; A2 = A2 - I2 ;12-15 ;8-10 ;237F
xload xmm15, [srcreg+d2+48] ;; nxt I3 ;237
mulpd xmm13, xmm15 ;; nxt B3 = I3 * cosine/sine ;15-20 ;8-12
addpd xmm5, xmm6 ;; B2 = B2 + R2 ;14-17 ;9-11 ;2367
xload xmm2, [screg+off3] ;; sine ;367
mulpd xmm0, xmm2 ;; A3 = A3 * sine (new R3) ;17-22 ;9-13
subpd xmm8, xmm11 ;; A4 = A4 - I4 ;16-19 ;10-12 ;367B
mulpd xmm1, xmm2 ;; B3 = B3 * sine (new I3) ;19-24 ;10-14 ;2367B
xload xmm3, [screg+off2] ;; sine ;267B
addpd xmm9, xmm10 ;; B4 = B4 + R4 ;18-21 ;11-13 ;267AB
mulpd xmm4, xmm3 ;; A2 = A2 * sine (new R2) ;21-26 ;11-15
xload xmm10, [srcreg] ;; R1 ;267B
subpd xmm12, xmm15 ;; nxt A3 = A3 - I3 ;20-23 ;12-14 ;267BF
mulpd xmm5, xmm3 ;; B2 = B2 * sine (new I2) ;23-28 ;12-16 ;2367BF
xload xmm7, [screg+off4] ;; sine ;236BF
xcopy xmm11, xmm10 ;; Copy R1 ;236F
addpd xmm13, xmm14 ;; nxt B3 = B3 + R3 ;22-25 ;13-15 ;236EF
mulpd xmm8, xmm7 ;; A4 = A4 * sine (new R4) ;25-30 ;13-17
xload xmm2, [screg+off7] ;; nxt sine ;36EF
subpd xmm10, xmm0 ;; R1 = R1 - R3 (mid R3) ;24-27 ;14-16
mulpd xmm9, xmm7 ;; B4 = B4 * sine (new I4) ;27-32 ;14-18 ;367EF
xload xmm15, [srcreg+16] ;; I1 ;367E
addpd xmm0, xmm11 ;; R3 = R1 + R3 (mid R1) ;26-29 ;15-17 ;367BE
mulpd xmm12, xmm2 ;; nxt A3 = A3 * sine (new R3) ;29-34 ;15-19
xcopy xmm3, xmm15 ;; Copy I1 ;67BE
subpd xmm15, xmm1 ;; I1 = I1 - I3 (mid I3) ;28-31 ;16-18
mulpd xmm13, xmm2 ;; nxt B3 = B3 * sine (new I3) ;31-36 ;16-20 ;267BE
xcopy xmm14, xmm4 ;; Copy new R2 ;267B
xprefetchw [srcreg+srcinc]
addpd xmm1, xmm3 ;; I3 = I1 + I3 (mid I1) ;30-33 ;17-19 ;2367B
xcopy xmm7, xmm5 ;; Copy new I2 ;236B
xload xmm2, [srcreg+d1+32] ;; nxt R2 ;36B
subpd xmm4, xmm8 ;; R2 = R2 - R4 (mid R4) ;32-35 ;18-20
xload xmm3, [screg+off6+16] ;; nxt cosine/sine ;6B
subpd xmm5, xmm9 ;; I2 = I2 - I4 (mid I4) ;34-37 ;19-21
xcopy xmm6, xmm2 ;; nxt Copy R2 ;B
xprefetchw [srcreg+srcinc+d1]
addpd xmm8, xmm14 ;; R4 = R2 + R4 (mid R2) ;36-39 ;20-22 ;BE
xcopy xmm11, xmm10 ;; Copy mid R3 ;28-33 ; 17 ;E
xload xmm14, [srcreg+d1+48] ;; nxt I2 ;
addpd xmm9, xmm7 ;; I4 = I2 + I4 (mid I2) ;38-41 ;21-23 ;7
xcopy xmm7, xmm15 ;; Copy mid I3 ; 19 ;
subpd xmm10, xmm5 ;; R3 = R3 - I4 (final R3) ;40-43 ;22-24
xstore [srcreg+d1], xmm10 ;; Save R3 ; 25-27 ;A
xload xmm10, [srcreg+d2+d1+32];; nxt R4 ;
addpd xmm5, xmm11 ;; I4 = R3 + I4 (final R4) ;42-45 ;23-25 ;B
mulpd xmm2, xmm3 ;; nxt A2 = R2 * cosine/sine ;45-50 ;23-27
xcopy xmm11, xmm0 ;; Copy mid R1 ; 18 ;
xstore [srcreg+d1+32], xmm5 ;; Save R4 ; 26-28 ;5
xload xmm5, [screg+off8+16] ;; nxt cosine/sine ;
subpd xmm15, xmm4 ;; I3 = I3 - R4 (final I4) ;44-47 ;24-26
mulpd xmm3, xmm14 ;; nxt B2 = I2 * cosine/sine ;47-52 ;24-28
xstore [srcreg+d1+48], xmm15 ;; Save I4 ; 27-29 ;F
xcopy xmm15, xmm10 ;; nxt Copy R4 ;
addpd xmm4, xmm7 ;; R4 = I3 + R4 (final I3) ;46-49 ;25-27 ;7
mulpd xmm10, xmm5 ;; nxt A4 = R4 * cosine/sine ;49-54 ;25-29
xload xmm7, [srcreg+d2+d1+48] ;; nxt I4 ;
subpd xmm0, xmm8 ;; R1 = R1 - R2 (final R2) ;48-51 ;26-28
mulpd xmm5, xmm7 ;; nxt B4 = I4 * cosine/sine ;51-56 ;26-30
addpd xmm8, xmm11 ;; R2 = R1 + R2 (final R1) ;50-53 ;27-29 ;B
xload xmm11, [screg+off6] ;; nxt sine ;
subpd xmm2, xmm14 ;; nxt A2 = A2 - I2 ;52-55 ;28-30 ;E
xcopy xmm14, xmm1 ;; Copy mid I1 ;
xstore [srcreg], xmm8 ;; Save R1 ; 28-30 ;8
xload xmm8, [screg+off8] ;; nxt sine ;
addpd xmm3, xmm6 ;; nxt B2 = B2 + R2 ;54-57 ;29-31 ;6
xload xmm6, [srcreg+32] ;; nxt R1 ;
xstore [srcreg+32], xmm0 ;; Save R2 ; 29-31 ;0
subpd xmm10, xmm7 ;; nxt A4 = A4 - I4 ;56-59 ;30-32 ;07
xcopy xmm0, xmm6 ;; nxt Copy R1 ;7
xstore [srcreg+d1+16], xmm4 ;; Save I3 ; 30-32 ;4
addpd xmm5, xmm15 ;; nxt B4 = B4 + R4 ;58-61 ;31-33 ;4F
mulpd xmm2, xmm11 ;; nxt A2 = A2 * sine (new R2) ;57-62 ;31-35
xload xmm4, [srcreg+48] ;; nxt I1 ;F
subpd xmm1, xmm9 ;; I1 = I1 - I2 (final I2) ;52-55 ;32-34
mulpd xmm3, xmm11 ;; nxt B2 = B2 * sine (new I2) ;59-64 ;32-36 ;BF
xcopy xmm15, xmm4 ;; nxt Copy I1 ;B
addpd xmm9, xmm14 ;; I2 = I1 + I2 (final I1) ;54-57 ;33-35 ;+E
mulpd xmm10, xmm8 ;; nxt A4 = A4 * sine (new R4) ;61-66 ;33-37
xstore [srcreg+48], xmm1 ;; Save I2 ; 35-37 ;+1
subpd xmm6, xmm12 ;; nxt R1 = R1 - R3 (mid R3) ;56-59 ;34-36
mulpd xmm5, xmm8 ;; nxt B4 = B4 * sine (new I4) ;63-68 ;34-38 ;+8
xprefetchw [srcreg+srcinc+d2]
addpd xmm12, xmm0 ;; nxt R3 = R1 + R3 (mid R1) ;58-61 ;35-37 ;+0
subpd xmm4, xmm13 ;; nxt I1 = I1 - I3 (mid I3) ;60-63 ;36-38
addpd xmm13, xmm15 ;; nxt I3 = I1 + I3 (mid I1) ;62-65 ;37-39 ;+F
xstore [srcreg+16], xmm9 ;; Save I1 ; 36-38 ;+9
xcopy xmm11, xmm2 ;; nxt Copy new R2 ;-B
subpd xmm2, xmm10 ;; nxt R2 = R2 - R4 (mid R4) ;64-67 ;38-40
addpd xmm10, xmm11 ;; nxt R4 = R2 + R4 (mid R2) ;66-69 ;39-41 ;+B
xcopy xmm14, xmm3 ;; nxt Copy new I2 ;-E
subpd xmm3, xmm5 ;; nxt I2 = I2 - I4 (mid I4) ;68-71 ;40-42
addpd xmm5, xmm14 ;; nxt I4 = I2 + I4 (mid I2) ;70-73 ;41-43 ;+E
xprefetchw [srcreg+srcinc+d2+d1]
xcopy xmm8, xmm6 ;; nxt Copy mid R3
subpd xmm6, xmm3 ;; nxt R3 = R3 - I4 (final R3) ;72-75 ;42-44
xcopy xmm9, xmm12 ;; nxt Copy mid R1
subpd xmm12, xmm10 ;; nxt R1 = R1 - R2 (final R2) ;74-77 ;43-45
xcopy xmm0, xmm4 ;; nxt Copy mid I3
subpd xmm4, xmm2 ;; nxt I3 = I3 - R4 (final I4) ;76-79 ;44-46
addpd xmm3, xmm8 ;; nxt I4 = R3 + I4 (final R4) ;78-81 ;45-47
addpd xmm10, xmm9 ;; nxt R2 = R1 + R2 (final R1) ;80-83 ;46-48
addpd xmm2, xmm0 ;; nxt R4 = I3 + R4 (final I3) ;82-85 ;47-49
xcopy xmm11, xmm13 ;; nxt Copy mid I1
subpd xmm13, xmm5 ;; nxt I1 = I1 - I2 (final I2) ;84-87 ;48-50
addpd xmm5, xmm11 ;; nxt I2 = I1 + I2 (final I1) ;86-89 ;49-51
xstore [srcreg+d2+d1], xmm6 ;; nxt Save R3
xstore [srcreg+d2+32], xmm12 ;; nxt Save R2
xstore [srcreg+d2+d1+48], xmm4 ;; nxt Save I4
xstore [srcreg+d2+d1+32], xmm3 ;; nxt Save R4
xstore [srcreg+d2], xmm10 ;; nxt Save R1
xstore [srcreg+d2+d1+16], xmm2 ;; nxt Save I3
xstore [srcreg+d2+48], xmm13 ;; nxt Save I2
xstore [srcreg+d2+16], xmm5 ;; nxt Save I1
ENDM
g4cl_four_complex_fft MACRO srcreg,srcinc,d1,d2,dstreg,dstinc,e1,e2
g4cl_fft_cmn srcreg,srcinc,d1,d2,dstreg,dstinc,e1,e2,rdi,0,32,64,XMM_SCD,XMM_SCD+32,XMM_SCD+64
bump srcreg, srcinc
bump dstreg, dstinc
ENDM
g4cl_fft_cmn MACRO srcreg,srcinc,d1,d2,dstreg,dstinc,e1,e2,screg,off2,off3,off4,off6,off7,off8
xload xmm0, [srcreg+d2] ;; R3 ;P4 ;Core 2
xload xmm1, [screg+off3+16] ;; cosine/sine
xcopy xmm2, xmm0 ;; Copy R3 ;3456789ABCDEF
mulpd xmm0, xmm1 ;; A3 = R3 * cosine/sine ;1-6 ;1-5
xload xmm3, [srcreg+d2+16] ;; I3 ;456789ABCDEF
mulpd xmm1, xmm3 ;; B3 = I3 * cosine/sine ;3-8 ;2-6
xload xmm4, [srcreg+d1] ;; R2
xload xmm5, [screg+off2+16] ;; cosine/sine
xcopy xmm6, xmm4 ;; Copy R2 ;789ABCDEF
mulpd xmm4, xmm5 ;; A2 = R2 * cosine/sine ;5-10 ;3-7
xload xmm7, [srcreg+d1+16] ;; I2 ;89ABCDEF
mulpd xmm5, xmm7 ;; B2 = I2 * cosine/sine ;7-12 ;4-8
xload xmm8, [srcreg+d2+d1] ;; R4
xload xmm9, [screg+off4+16] ;; cosine/sine
xcopy xmm10, xmm8 ;; Copy R4 ;BCDEF
mulpd xmm8, xmm9 ;; A4 = R4 * cosine/sine ;9-14 ;5-9
subpd xmm0, xmm3 ;; A3 = A3 - I3 ;8-11 ;6-8 ;3BCDEF
xload xmm11, [srcreg+d2+d1+16];; I4 ;3CDEF
mulpd xmm9, xmm11 ;; B4 = I4 * cosine/sine ;11-16 ;6-10
addpd xmm1, xmm2 ;; B3 = B3 + R3 ;10-13 ;7-9 ;23CDEF
xload xmm12, [srcreg+d2+32] ;; nxt R3
xload xmm13, [screg+off7+16] ;; nxt cosine/sine
xcopy xmm14, xmm12 ;; Copy nxt R3 ;23F
mulpd xmm12, xmm13 ;; nxt A3 = R3 * cosine/sine ;13-18 ;7-11
subpd xmm4, xmm7 ;; A2 = A2 - I2 ;12-15 ;8-10 ;237F
xload xmm15, [srcreg+d2+48] ;; nxt I3 ;237
mulpd xmm13, xmm15 ;; nxt B3 = I3 * cosine/sine ;15-20 ;8-12
addpd xmm5, xmm6 ;; B2 = B2 + R2 ;14-17 ;9-11 ;2367
xload xmm2, [screg+off3] ;; sine ;367
mulpd xmm0, xmm2 ;; A3 = A3 * sine (new R3) ;17-22 ;9-13
subpd xmm8, xmm11 ;; A4 = A4 - I4 ;16-19 ;10-12 ;367B
mulpd xmm1, xmm2 ;; B3 = B3 * sine (new I3) ;19-24 ;10-14 ;2367B
xload xmm3, [screg+off2] ;; sine ;267B
addpd xmm9, xmm10 ;; B4 = B4 + R4 ;18-21 ;11-13 ;267AB
mulpd xmm4, xmm3 ;; A2 = A2 * sine (new R2) ;21-26 ;11-15
xload xmm10, [srcreg] ;; R1 ;267B
subpd xmm12, xmm15 ;; nxt A3 = A3 - I3 ;20-23 ;12-14 ;267BF
mulpd xmm5, xmm3 ;; B2 = B2 * sine (new I2) ;23-28 ;12-16 ;2367BF
xload xmm7, [screg+off4] ;; sine ;236BF
xcopy xmm11, xmm10 ;; Copy R1 ;236F
addpd xmm13, xmm14 ;; nxt B3 = B3 + R3 ;22-25 ;13-15 ;236EF
mulpd xmm8, xmm7 ;; A4 = A4 * sine (new R4) ;25-30 ;13-17
xload xmm2, [screg+off7] ;; nxt sine ;36EF
subpd xmm10, xmm0 ;; R1 = R1 - R3 (mid R3) ;24-27 ;14-16
mulpd xmm9, xmm7 ;; B4 = B4 * sine (new I4) ;27-32 ;14-18 ;367EF
xload xmm15, [srcreg+16] ;; I1 ;367E
addpd xmm0, xmm11 ;; R3 = R1 + R3 (mid R1) ;26-29 ;15-17 ;367BE
mulpd xmm12, xmm2 ;; nxt A3 = A3 * sine (new R3) ;29-34 ;15-19
xcopy xmm3, xmm15 ;; Copy I1 ;67BE
subpd xmm15, xmm1 ;; I1 = I1 - I3 (mid I3) ;28-31 ;16-18
mulpd xmm13, xmm2 ;; nxt B3 = B3 * sine (new I3) ;31-36 ;16-20 ;267BE
xcopy xmm14, xmm4 ;; Copy new R2 ;267B
xprefetch [srcreg+srcinc]
xprefetchw [dstreg+dstinc]
addpd xmm1, xmm3 ;; I3 = I1 + I3 (mid I1) ;30-33 ;17-19 ;2367B
xcopy xmm7, xmm5 ;; Copy new I2 ;236B
xload xmm2, [srcreg+d1+32] ;; nxt R2 ;36B
subpd xmm4, xmm8 ;; R2 = R2 - R4 (mid R4) ;32-35 ;18-20
xload xmm3, [screg+off6+16] ;; nxt cosine/sine ;6B
subpd xmm5, xmm9 ;; I2 = I2 - I4 (mid I4) ;34-37 ;19-21
xcopy xmm6, xmm2 ;; nxt Copy R2 ;B
xprefetch [srcreg+srcinc+d1]
xprefetchw [dstreg+dstinc+e1]
addpd xmm8, xmm14 ;; R4 = R2 + R4 (mid R2) ;36-39 ;20-22 ;BE
xcopy xmm11, xmm10 ;; Copy mid R3 ;28-33 ; 17 ;E
xload xmm14, [srcreg+d1+48] ;; nxt I2 ;
addpd xmm9, xmm7 ;; I4 = I2 + I4 (mid I2) ;38-41 ;21-23 ;7
xcopy xmm7, xmm15 ;; Copy mid I3 ; 19 ;
subpd xmm10, xmm5 ;; R3 = R3 - I4 (final R3) ;40-43 ;22-24
xstore [dstreg+e1], xmm10 ;; Save R3 ; 25-27 ;A
xload xmm10, [srcreg+d2+d1+32];; nxt R4 ;
addpd xmm5, xmm11 ;; I4 = R3 + I4 (final R4) ;42-45 ;23-25 ;B
mulpd xmm2, xmm3 ;; nxt A2 = R2 * cosine/sine ;45-50 ;23-27
xcopy xmm11, xmm0 ;; Copy mid R1 ; 18 ;
xstore [dstreg+e1+32], xmm5 ;; Save R4 ; 26-28 ;5
xload xmm5, [screg+off8+16] ;; nxt cosine/sine ;
subpd xmm15, xmm4 ;; I3 = I3 - R4 (final I4) ;44-47 ;24-26
mulpd xmm3, xmm14 ;; nxt B2 = I2 * cosine/sine ;47-52 ;24-28
xstore [dstreg+e1+48], xmm15 ;; Save I4 ; 27-29 ;F
xcopy xmm15, xmm10 ;; nxt Copy R4 ;
addpd xmm4, xmm7 ;; R4 = I3 + R4 (final I3) ;46-49 ;25-27 ;7
mulpd xmm10, xmm5 ;; nxt A4 = R4 * cosine/sine ;49-54 ;25-29
xload xmm7, [srcreg+d2+d1+48] ;; nxt I4 ;
subpd xmm0, xmm8 ;; R1 = R1 - R2 (final R2) ;48-51 ;26-28
mulpd xmm5, xmm7 ;; nxt B4 = I4 * cosine/sine ;51-56 ;26-30
addpd xmm8, xmm11 ;; R2 = R1 + R2 (final R1) ;50-53 ;27-29 ;B
xload xmm11, [screg+off6] ;; nxt sine ;
subpd xmm2, xmm14 ;; nxt A2 = A2 - I2 ;52-55 ;28-30 ;E
xcopy xmm14, xmm1 ;; Copy mid I1 ;
xstore [dstreg], xmm8 ;; Save R1 ; 28-30 ;8
xload xmm8, [screg+off8] ;; nxt sine ;
addpd xmm3, xmm6 ;; nxt B2 = B2 + R2 ;54-57 ;29-31 ;6
xload xmm6, [srcreg+32] ;; nxt R1 ;
xstore [dstreg+32], xmm0 ;; Save R2 ; 29-31 ;0
subpd xmm10, xmm7 ;; nxt A4 = A4 - I4 ;56-59 ;30-32 ;07
xcopy xmm0, xmm6 ;; nxt Copy R1 ;7
xstore [dstreg+e1+16], xmm4 ;; Save I3 ; 30-32 ;4
addpd xmm5, xmm15 ;; nxt B4 = B4 + R4 ;58-61 ;31-33 ;4F
mulpd xmm2, xmm11 ;; nxt A2 = A2 * sine (new R2) ;57-62 ;31-35
xload xmm4, [srcreg+48] ;; nxt I1 ;F
subpd xmm1, xmm9 ;; I1 = I1 - I2 (final I2) ;52-55 ;32-34
mulpd xmm3, xmm11 ;; nxt B2 = B2 * sine (new I2) ;59-64 ;32-36 ;BF
xcopy xmm15, xmm4 ;; nxt Copy I1 ;B
addpd xmm9, xmm14 ;; I2 = I1 + I2 (final I1) ;54-57 ;33-35 ;+E
mulpd xmm10, xmm8 ;; nxt A4 = A4 * sine (new R4) ;61-66 ;33-37
xstore [dstreg+48], xmm1 ;; Save I2 ; 35-37 ;+1
subpd xmm6, xmm12 ;; nxt R1 = R1 - R3 (mid R3) ;56-59 ;34-36
mulpd xmm5, xmm8 ;; nxt B4 = B4 * sine (new I4) ;63-68 ;34-38 ;+8
xprefetch [srcreg+srcinc+d2]
xprefetchw [dstreg+dstinc+e2]
addpd xmm12, xmm0 ;; nxt R3 = R1 + R3 (mid R1) ;58-61 ;35-37 ;+0
subpd xmm4, xmm13 ;; nxt I1 = I1 - I3 (mid I3) ;60-63 ;36-38
addpd xmm13, xmm15 ;; nxt I3 = I1 + I3 (mid I1) ;62-65 ;37-39 ;+F
xstore [dstreg+16], xmm9 ;; Save I1 ; 36-38 ;+9
xcopy xmm11, xmm2 ;; nxt Copy new R2 ;-B
subpd xmm2, xmm10 ;; nxt R2 = R2 - R4 (mid R4) ;64-67 ;38-40
addpd xmm10, xmm11 ;; nxt R4 = R2 + R4 (mid R2) ;66-69 ;39-41 ;+B
xcopy xmm14, xmm3 ;; nxt Copy new I2 ;-E
subpd xmm3, xmm5 ;; nxt I2 = I2 - I4 (mid I4) ;68-71 ;40-42
addpd xmm5, xmm14 ;; nxt I4 = I2 + I4 (mid I2) ;70-73 ;41-43 ;+E
xprefetch [srcreg+srcinc+d2+d1]
xprefetchw [dstreg+dstinc+e2+e1]
xcopy xmm8, xmm6 ;; nxt Copy mid R3
subpd xmm6, xmm3 ;; nxt R3 = R3 - I4 (final R3) ;72-75 ;42-44
xcopy xmm9, xmm12 ;; nxt Copy mid R1
subpd xmm12, xmm10 ;; nxt R1 = R1 - R2 (final R2) ;74-77 ;43-45
xcopy xmm0, xmm4 ;; nxt Copy mid I3
subpd xmm4, xmm2 ;; nxt I3 = I3 - R4 (final I4) ;76-79 ;44-46
addpd xmm3, xmm8 ;; nxt I4 = R3 + I4 (final R4) ;78-81 ;45-47
addpd xmm10, xmm9 ;; nxt R2 = R1 + R2 (final R1) ;80-83 ;46-48
addpd xmm2, xmm0 ;; nxt R4 = I3 + R4 (final I3) ;82-85 ;47-49
xcopy xmm11, xmm13 ;; nxt Copy mid I1
subpd xmm13, xmm5 ;; nxt I1 = I1 - I2 (final I2) ;84-87 ;48-50
addpd xmm5, xmm11 ;; nxt I2 = I1 + I2 (final I1) ;86-89 ;49-51
xstore [dstreg+e2+e1], xmm6 ;; nxt Save R3
xstore [dstreg+e2+32], xmm12 ;; nxt Save R2
xstore [dstreg+e2+e1+48], xmm4 ;; nxt Save I4
xstore [dstreg+e2+e1+32], xmm3 ;; nxt Save R4
xstore [dstreg+e2], xmm10 ;; nxt Save R1
xstore [dstreg+e2+e1+16], xmm2 ;; nxt Save I3
xstore [dstreg+e2+48], xmm13 ;; nxt Save I2
xstore [dstreg+e2+16], xmm5 ;; nxt Save I1
ENDM
;; Cheat sheet for scheduling dependency chains (and num registers required)
;; 12345678901234567890123456789012345678901234567890123456789012345678901234567890
;;r24(i2) AAA
;;r24(i1) AAA
;;r57(i4) AAA
;;r57(r3) AAA
;;r13(r2) AAA
;;r13(r1) AAA
;;r68(r4) AAA
;;r68(i3) AAA
;;
;;mI4(depI2I4) AAA
;;mR4(depR2R4) AAA
;;mI2(depI2I4) AAA
;;mR2(depR2R4) AAA
;;mR3(depR1R3) AAA
;;mI3(depI1I3) AAA
;;mR1(depR1R3) AAA
;;mI1(depI1I3) AAA 8 reg
;;B4 MMMMMAAAMMMMM
;;B2 MMMMMAAAMMMMM
;;A4 MMMMMAAAMMMMM
;;A2 MMMMMAAAMMMMM
;;B3 MMMMMAAAMMMMM
;;A3 MMMMMAAAMMMMM to 14 and back to 8
;; 8 storeable!
;;nxt r24(i2) AAA
;;nxt r24(i1) AAA
;;nxt r57(i4) AAA
;;nxt r57(r3) AAA to 6
;;nxt r13(r2) AAA
;;nxt r13(r1) AAA to 15 to 14
;;nxt r68(r4) AAA
;;nxt r68(i3) AAA store 1 = 13, to 16 to 15
;;nxt mI4(depI2I4) AAA to 16
;;nxt mR4(depR2R4) AAA store 1 = 15, to 16
;;nxt mI2(depI2I4) AAA to 15
;;nxt mR2(depR2R4) AAA to 14
;;nxt mR3(depR1R3) AAA to 15
;;nxt mI3(depI1I3) AAA to 16
;;nxt mR1(depR1R3) AAA
;;nxt mI1(depI1I3) AAA to 12
;;nxt B4 MMMMMAAAMMMMM
;;nxt A4 MMMMMAAAMMMMM
;;nxt B2 MMMMMAAAMMMMM registers get messy here
;;nxt A2 MMMMMAAAMMMMM
;;nxt B3 MMMMMAAAMMMMM
;;nxt A3 MMMMMAAAMMMMM
x4cl_four_complex_unfft MACRO srcreg,srcinc,d1,d2,screg
x4cl_unfft_cmn srcreg,srcinc,d1,d2,screg,0,32,64,0,32,64
bump srcreg, srcinc
ENDM
;; Core 2 actually comes in at 54 clocks
x4cl_unfft_cmn MACRO srcreg,srcinc,d1,d2,screg,off2,off3,off4,off6,off7,off8
xload xmm1, [srcreg+32] ;; mem2 (I1) ;P4 ;Core2
xload xmm0, [srcreg+d1+32] ;; mem4 (I2)
xcopy xmm2, xmm1 ;; Copy I1 ;3456789ABCDEF
subpd xmm1, xmm0 ;; new I2 = I1 - I2 ;1-4 ;1-3
xload xmm6, [srcreg+d2+d1] ;; mem7 (R4)
xload xmm7, [srcreg+d2] ;; mem5 (R3)
xcopy xmm8, xmm6 ;; Copy R4 ;3459ABCDEF
subpd xmm6, xmm7 ;; new I4 = R4 - R3 ;3-6 ;2-4
addpd xmm0, xmm2 ;; new I1 = I1 + I2 ;5-8 ;3-5 ;23459ABCDEF
xload xmm12, [srcreg] ;; mem1 (R1)
xload xmm11, [srcreg+d1] ;; mem3 (R2)
xcopy xmm13, xmm12 ;; Copy R1 ;23459AEF
subpd xmm12, xmm11 ;; new R2 = R1 - R2 ;7-10 ;4-6
xcopy xmm10, xmm1 ;; Copy new I2 ;5-10 ; 4 ;23459EF
subpd xmm1, xmm6 ;; I2 = I2 - I4 (mid I4) ;9-12 ;5-7
xload xmm4, [srcreg+d2+32] ;; mem6 (I3)
xload xmm3, [srcreg+d2+d1+32] ;; mem8 (I4)
xcopy xmm5, xmm4 ;; Copy I3 ;29EF
subpd xmm4, xmm3 ;; new R4 = I3 - I4 ;11-14 ;6-8
xcopy xmm9, xmm0 ;; Copy new I1 ;9-14 ; 6 ;2EF
addpd xmm6, xmm10 ;; I4 = I2 + I4 (mid I2) ;13-16 ;7-9 ;2AEF
xcopy xmm2, xmm12 ;; Copy new R2 ;11-16 ; 7 ;AEF
xprefetchw [srcreg+srcinc]
addpd xmm7, xmm8 ;; new R3 = R3 + R4 ;15-18 ;8-10 ;8AEF
xload xmm8, [screg+off4+16] ;; B4 = pre_real/pre_imag ;AEF
addpd xmm11, xmm13 ;; new R1 = R1 + R2 ;17-20 ;9-11 ;ADEF
xcopy xmm15, xmm8 ;; A4 = pre_real/pre_imag ;ADE
addpd xmm3, xmm5 ;; new I3 = I3 + I4 ;19-22 ;10-12 ;5ADE
xload xmm10, [screg+off2+16] ;; B2 = pre_real/pre_imag ;5DE
subpd xmm12, xmm4 ;; R4 = R2 - R4 (mid R4) ;21-24 ;11-13