-
Notifications
You must be signed in to change notification settings - Fork 474
/
Copy pathpyspark-session-2018-10-09.txt
executable file
·155 lines (154 loc) · 4.13 KB
/
pyspark-session-2018-10-09.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
mparsian@Mahmouds-MacBook ~/spark-2.3.0 $ ./zbin/zenv_setup.sh
mparsian@Mahmouds-MacBook ~/spark-2.3.0 $ ./bin/pyspark
Python 2.7.10 (default, Oct 6 2017, 22:29:07)
[GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.31)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
18/10/09 18:04:39 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version 2.3.0
/_/
Using Python version 2.7.10 (default, Oct 6 2017 22:29:07)
SparkSession available as 'spark'.
>>>
>>>
>>> spark
<pyspark.sql.session.SparkSession object at 0x1025f9c50>
>>>
>>>
>>>
>>>
>>> data = [1, -3, 4, 2, -5, 2]
>>> data
[1, -3, 4, 2, -5, 2]
>>> rdd = spark.sparkContext.parallalize(data)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'SparkContext' object has no attribute 'parallalize'
>>> rdd = spark.sparkContext.parallelize(data)
>>> rdd.count()
6
>>> rdd.collect()
[1, -3, 4, 2, -5, 2]
>>>
>>> def myfun(n):
... mylist = []
... if n > 0:
... mylist.append(100)
... mylist.append(200)
... else:
... mylist.append(0)
... #
... return mylist
...
>>>
>>> x = myfun(3)
>>> x
[100, 200]
>>> y = myfun(-55)
>>> y
[0]
>>>
>>> rdd2 = rdd.flatMap(myfun)
>>> rdd.collect()
[1, -3, 4, 2, -5, 2]
>>> rdd2.collect()
[100, 200, 0, 100, 200, 100, 200, 0, 100, 200]
>>> rdd2.count()
10
>>>
>>>
>>>
>>> rdd3 = rdd2.filter(lambda x : x > 100)
>>> rdd3.collect()
[200, 200, 200, 200]
>>>
>>> rdd4 = rdd2.filter(lambda x : x > 10)
>>> rdd4.collect()
[100, 200, 100, 200, 100, 200, 100, 200]
>>>
>>>
>>> def keep100(n):
... if n > 100:
... return True
... else:
... return False
...
>>>
>>> rdd5 = rdd2.filter(keep100)
>>> rdd5.collect()
[200, 200, 200, 200]
>>>
>>>
>>> rdd2.collect()
[100, 200, 0, 100, 200, 100, 200, 0, 100, 200]
>>> rdd6 = rdd.map(lambda x : x+1000)
>>> rdd6.collect()
[1001, 997, 1004, 1002, 995, 1002]
>>>
>>> def myadder(n):
... if n > 0:
... return n+1000
... else:
... return n
...
>>>
>>> rdd2.collect()
[100, 200, 0, 100, 200, 100, 200, 0, 100, 200]
>>> rdd7 = rdd2.map(myadder)
>>> rdd7.collect()
[1100, 1200, 0, 1100, 1200, 1100, 1200, 0, 1100, 1200]
>>>
>>>
>>>
>>>
>>>
>>>
>>> rdd2.collect()
[100, 200, 0, 100, 200, 100, 200, 0, 100, 200]
>>> mysum = rdd2.reduce(lambda x,y: x+y)
>>> mysum
1200
>>>
>>>
>>>
>>>
>>>
>>> pairs = [("a", 2), ("b", 3), ("a", 3), ("b", 4), ("a", 7), ("b", 10), ("c", 7), ("c", 1)]
>>>
>>> pairs
[('a', 2), ('b', 3), ('a', 3), ('b', 4), ('a', 7), ('b', 10), ('c', 7), ('c', 1)]
>>>
>>> pairs_rdd = spark.sparkContext.parallelize(pairs)
>>> pairs_rdd.count()
8
>>> pairs_rdd.collect()
[('a', 2), ('b', 3), ('a', 3), ('b', 4), ('a', 7), ('b', 10), ('c', 7), ('c', 1)]
>>>
>>>
>>> grouped = pairs_rdd.groupByKey()
>>> grouped.collect()
[('a', <pyspark.resultiterable.ResultIterable object at 0x102747790>), ('c', <pyspark.resultiterable.ResultIterable object at 0x1027476d0>), ('b', <pyspark.resultiterable.ResultIterable object at 0x10271bf10>)]
>>> grouped.mapValues(lambda it: list(it)).collect()
[('a', [2, 3, 7]), ('c', [7, 1]), ('b', [3, 4, 10])]
>>>
>>> incby100 = pairs_rdd.mapValues(lambda x : x+100)
>>> incby100.collect()
[('a', 102), ('b', 103), ('a', 103), ('b', 104), ('a', 107), ('b', 110), ('c', 107), ('c', 101)]
>>> incby1000 = pairs_rdd.map(lambda (k,v) : (k, v+1000))
>>> incby1000.collect()
[('a', 1002), ('b', 1003), ('a', 1003), ('b', 1004), ('a', 1007), ('b', 1010), ('c', 1007), ('c', 1001)]
>>>
>>>
>>> grouped.collect()
[('a', <pyspark.resultiterable.ResultIterable object at 0x10273dbd0>), ('c', <pyspark.resultiterable.ResultIterable object at 0x10273d4d0>), ('b', <pyspark.resultiterable.ResultIterable object at 0x10273d950>)]
>>>
>>> average = grouped.mapValues(lambda it: sum(it)/len(it))
>>> average.collect()
[('a', 4), ('c', 4), ('b', 5)]
>>> average = grouped.mapValues(lambda it: float(sum(it))/float(len(it)))
>>> average.collect()
[('a', 4.0), ('c', 4.0), ('b', 5.666666666666667)]
>>>