forked from HugoBlox/theme-academic-cv
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathoverwrite.bib
508 lines (471 loc) · 73.2 KB
/
overwrite.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
@inproceedings{crume:pdsw12,
Abstract = {In Hadoop mappers send data to reducers in the form of key/value pairs. The default design of Hadoop's process for transmitting this intermediate data can cause a very high overhead, especially for scientific data containing multiple variables in a multi-dimensional space. For example, for a 3D scalar field of a variable ``windspeed1'' the size of keys was 6.75 times the size of values. Much of the disk and network bandwidth of ``shuffling'' this intermediate data is consumed by repeatedly transmitting the variable name for each value. This significant waste of resources is due to an assumption fundamental to Hadoop's design that all key/values are independent. This assumption is inadequate for scientific data which is often organized in regular grids, a structure that can be described in small, constant size.
Earlier we presented SciHadoop, a slightly modified version of Hadoop designed for processing scientific data. We reported on experiments with SciHadoop which confirm that the size of intermediate data has a significant impact on overall performance. Here we show preliminary designs of multiple lossless approaches to compressing intermediate data, one of which results in up to five orders of magnitude reduction the original key/value ratio.},
Address = {Salt Lake City, UT},
Author = {Adam Crume and Joe Buck and Carlos Maltzahn and Scott Brandt},
Booktitle = {PDSW'12},
Date-Added = {2012-11-02 06:02:29 +0000},
Date-Modified = {2020-01-05 06:29:22 -0700},
Keywords = {papers, mapreduce, compression, array},
Month = {November 12},
Title = {Compressing Intermediate Keys between Mappers and Reducers in SciHadoop},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxASQy9jcnVtZS1wZHN3MTIucGRmTxEBYAAAAAABYAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////EGNydW1lLXBkc3cxMi5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFDAAACADgvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOkM6Y3J1bWUtcGRzdzEyLnBkZgAOACIAEABjAHIAdQBtAGUALQBwAGQAcwB3ADEAMgAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAIy9NeSBEcml2ZS9QYXBlcnMvQy9jcnVtZS1wZHN3MTIucGRmAAATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA5AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAZ0=},
Bdsk-File-2 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAZQy9jcnVtZS1wZHN3MTItc2xpZGVzLnBkZk8RAXwAAAAAAXwAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xdjcnVtZS1wZHN3MTItc2xpZGVzLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABQwAAAgA/LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpDOmNydW1lLXBkc3cxMi1zbGlkZXMucGRmAAAOADAAFwBjAHIAdQBtAGUALQBwAGQAcwB3ADEAMgAtAHMAbABpAGQAZQBzAC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAqL015IERyaXZlL1BhcGVycy9DL2NydW1lLXBkc3cxMi1zbGlkZXMucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkAEAAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABwA==}}
@inproceedings{he:pdsw12,
Abstract = {Checkpointing is the predominant storage driver in today's petascale supercomputers and is expected to remain as such in tomorrow's exascale supercomputers. Users typically prefer to checkpoint into a shared file yet parallel file systems often perform poorly for shared file writing. A powerful technique to address this problem is to transparently transform shared file writing into many exclusively written as is done in ADIOS and PLFS. Unfortunately, the metadata to reconstruct the fragments into the original file grows with the number of writers. As such, the current approach cannot scale to exaflop supercomputers due to the large overhead of creating and reassembling the metadata.
In this paper, we develop and evaluate algorithms by which patterns in the PLFS metadata can be discovered and then used to replace the current metadata. Our evaluation shows that these patterns reduce the size of the metadata by several orders of magnitude, increase the performance of writes by up to 40 percent, and the performance of reads by up to 480 percent. This contribution therefore can allow current checkpointing models to survive the transition from peta- to exascale.},
Address = {Salt Lake City, UT},
Author = {Jun He and John Bent and Aaron Torres and Gary Grider and Garth Gibson and Carlos Maltzahn and Xian-He Sun},
Booktitle = {PDSW'12},
Date-Added = {2012-11-02 06:00:38 +0000},
Date-Modified = {2020-01-05 05:28:43 -0700},
Keywords = {papers, compression, indexing, plfs, patterndetection, checkpointing},
Month = {November 12},
Read = {1},
Title = {Discovering Structure in Unstructured I/O},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAPSC9oZS1wZHN3MTIucGRmTxEBVAAAAAABVAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////DWhlLXBkc3cxMi5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFIAAACADUvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOkg6aGUtcGRzdzEyLnBkZgAADgAcAA0AaABlAC0AcABkAHMAdwAxADIALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACAvTXkgRHJpdmUvUGFwZXJzL0gvaGUtcGRzdzEyLnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA2AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAY4=},
Bdsk-File-2 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAWSC9oZS1wZHN3MTItc2xpZGVzLnBkZk8RAXAAAAAAAXAAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xRoZS1wZHN3MTItc2xpZGVzLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABSAAAAgA8LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpIOmhlLXBkc3cxMi1zbGlkZXMucGRmAA4AKgAUAGgAZQAtAHAAZABzAHcAMQAyAC0AcwBsAGkAZABlAHMALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACcvTXkgRHJpdmUvUGFwZXJzL0gvaGUtcGRzdzEyLXNsaWRlcy5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkAD0AAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABsQ==}}
@techreport{watkins:soetr12,
Abstract = {Cloud-based services have become an attractive alternative to in-house data centers because of their flexible, on-demand availability of compute and storage resources. This is also true for scientific high-performance computing (HPC) applications that are currently being run on expensive, dedicated hardware. One important challenge of HPC applications is their need to perform periodic global checkpoints of execution state to stable storage in order to recover from failures, but the checkpoint process can dominate the total run-time of HPC applications even in the failure-free case! In HPC architectures, dedicated stable storage is highly tuned for this type of workload using locality and physical layout policies, which are generally unknown in typical cloud environments. In this paper we introduce DataMods, an extended version of the Ceph file system and associated distributed object store RADOS, which are widely used in open source cloud stacks. DataMods extends object-based storage with extended services take advantage of common cloud data center node hardware configurations (i.e. CPU and local storage resources), and that can be used to construct efficient, scalable middleware services that span the entire storage stack and utilize asynchronous services for offline data management services.},
Address = {Santa Cruz, CA},
Author = {Noah Watkins and Carlos Maltzahn and Scott A. Brandt and Adam Manzanares},
Date-Added = {2012-07-21 11:39:45 +0000},
Date-Modified = {2020-01-05 05:29:20 -0700},
Institution = {University of California Santa Cruz},
Keywords = {papers, filesystems, programming, datamanagement},
Month = {July},
Number = {UCSC-SOE-12-07},
Title = {DataMods: Programmable File System Services},
Type = {Technical Report},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAVVy93YXRraW5zLXNvZXRyMTIucGRmTxEBbAAAAAABbAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////E3dhdGtpbnMtc29ldHIxMi5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFXAAACADsvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOlc6d2F0a2lucy1zb2V0cjEyLnBkZgAADgAoABMAdwBhAHQAawBpAG4AcwAtAHMAbwBlAHQAcgAxADIALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACYvTXkgRHJpdmUvUGFwZXJzL1cvd2F0a2lucy1zb2V0cjEyLnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA8AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAaw=}}
@inproceedings{bhagwan:spe12,
Abstract = {In healthcare, de-identification is fast becoming a service that is indispensable when medical data needs to be used for research and secondary use purposes. Currently, this process is done either manually, by human agent, or by an automated software algorithm. Both approaches have shortcomings. Here, we introduce a framework for enhancing the outcome of the current modes of executing a de-identification service. This paper presents the steps taken in conceiving and building a privacy framework and tool that improves the service of de-identification. Further, we test the usefulness and applicability of this system through a study with HIPAA-trained experts.},
Address = {Honolulu, HI},
Author = {Varun Bhagwan and Tyrone Grandison and Carlos Maltzahn},
Booktitle = {IEEE 2012 Services Workshop on Security and Privacy Engineering (SPE2012)},
Date-Added = {2012-05-22 03:42:44 +0000},
Date-Modified = {2020-01-05 05:29:59 -0700},
Keywords = {papers, privacy, humancomputation, healthcare},
Month = {June},
Title = {Recommendation-based De-Identification | A Practical Systems Approach towards De-identification of Unstructured Text in Healthcare},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxATQi9iaGFnd2FuLXNwZTEyLnBkZk8RAWQAAAAAAWQAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xFiaGFnd2FuLXNwZTEyLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABQgAAAgA5LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpCOmJoYWd3YW4tc3BlMTIucGRmAAAOACQAEQBiAGgAYQBnAHcAYQBuAC0AcwBwAGUAMQAyAC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAkL015IERyaXZlL1BhcGVycy9CL2JoYWd3YW4tc3BlMTIucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADoAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABog==}}
@inproceedings{kato:usenix12,
Abstract = {Graphics processing units (GPUs) have become a very powerful platform embracing a concept of heterogeneous many-core computing. However, application domains of GPUs are currently limited to specific systems, largely due to a lack of ``first-class'' GPU resource management for general-purpose multi-tasking systems.
We present Gdev, a new ecosystem of GPU resource management in the operating system (OS). It allows the user space as well as the OS itself to use GPUs as first-class computing resources. Specifically, Gdev's virtual memory manager supports data swapping for excessive memory resource demands, and also provides a shared device memory functionality that allows GPU contexts to communicate with other contexts. Gdev further provides a GPU scheduling scheme to virtualize a physical GPU into multiple logical GPUs, enhancing isolation among working sets of multi-tasking systems.
Our evaluation conducted on Linux and the NVIDIA GPU shows that the basic performance of our prototype implementation is reliable even compared to proprietary software. Further detailed experiments demonstrate that Gdev achieves a 2x speedup for an encrypted file system using the GPU in the OS. Gdev can also improve the makespan of dataflow programs by up to 49% exploiting shared device memory, while an error in the utilization of virtualized GPUs can be limited within only 7%.},
Address = {Boston, MA},
Author = {Shinpei Kato and Michael McThrow and Carlos Maltzahn and Scott A. Brandt},
Booktitle = {USENIX ATC '12},
Date-Added = {2012-04-06 22:55:09 +0000},
Date-Modified = {2020-01-05 05:30:40 -0700},
Keywords = {papers, gpgpu, kernel, linux, scheduling},
Title = {Gdev: First-Class GPU Resource Management in the Operating System},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxATSy9rYXRvLXVzZW5peDEyLnBkZk8RAWQAAAAAAWQAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xFrYXRvLXVzZW5peDEyLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABSwAAAgA5LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpLOmthdG8tdXNlbml4MTIucGRmAAAOACQAEQBrAGEAdABvAC0AdQBzAGUAbgBpAHgAMQAyAC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAkL015IERyaXZlL1BhcGVycy9LL2thdG8tdXNlbml4MTIucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADoAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABog==}}
@inproceedings{liu:msst12,
Abstract = {The largest-scale high-performance (HPC) systems are stretching parallel file systems to their limits in terms of aggregate bandwidth and numbers of clients. To further sustain the scalability of these file systems, researchers and HPC storage architects are exploring various storage system designs. One proposed storage system design integrates a tier of solid-state burst buffers into the storage system to absorb application I/O requests. In this paper, we simulate and explore this storage system design for use by large-scale HPC systems. First, we examine application I/O patterns on an existing large-scale HPC system to identify common burst patterns. Next, we describe enhancements to the CODES storage system simulator to enable our burst buffer simulations. These enhancements include the integration of a burst buffer model into the I/O forwarding layer of the simulator, the development of an I/O kernel description language and interpreter, the development of a suite of I/O kernels that are derived from observed I/O patterns, and fidelity improvements to the CODES models. We evaluate the I/O performance for a set of multiapplication I/O workloads and burst buffer configurations. We show that burst buffers can accelerate the application perceived throughput to the external storage system and can reduce the amount of external storage bandwidth required to meet a desired application perceived throughput goal.},
Address = {Pacific Grove, CA},
Author = {Ning Liu and Jason Cope and Philip Carns and Christopher Carothers and Robert Ross and Gary Grider and Adam Crume and Carlos Maltzahn},
Booktitle = {MSST/SNAPI 2012},
Date-Added = {2012-03-14 14:37:23 +0000},
Date-Modified = {2020-01-05 05:31:12 -0700},
Keywords = {papers, burstbuffer, simulation, hpc, distributed},
Month = {April 16 - 20},
Title = {On the Role of Burst Buffers in Leadership-class Storage Systems},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAQTC9saXUtbXNzdDEyLnBkZk8RAVgAAAAAAVgAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////w5saXUtbXNzdDEyLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABTAAAAgA2LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpMOmxpdS1tc3N0MTIucGRmAA4AHgAOAGwAaQB1AC0AbQBzAHMAdAAxADIALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACEvTXkgRHJpdmUvUGFwZXJzL0wvbGl1LW1zc3QxMi5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADcAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABkw==},
Bdsk-Url-1 = {http://www.mcs.anl.gov/uploads/cels/papers/P2070-0312.pdf}}
@article{ames:peds12,
Abstract = {File system metadata management has become a bottleneck for many data-intensive applications that rely on high-performance file systems. Part of the bottleneck is due to the limitations of an almost 50-year-old interface standard with metadata abstractions that were designed at a time when high-end file systems managed less than 100MB. Today's high-performance file systems store 7--9 orders of magnitude more data, resulting in a number of data items for which these metadata abstractions are inadequate, such as directory hierarchies unable to handle complex relationships among data. Users of file systems have attempted to work around these inadequacies by moving application-specific metadata management to relational databases to make metadata searchable. Splitting file system metadata management into two separate systems introduces inefficiencies and systems management problems. To address this problem, we propose QMDS: a file system metadata management service that integrates all file system metadata and uses a graph data model with attributes on nodes and edges. Our service uses a query language interface for file identification and attribute retrieval. We present our metadata management service design and architecture and study its performance using a text analysis benchmark application. Results from our QMDS prototype show the effectiveness of this approach. Compared to the use of a file system and relational database, the QMDS prototype shows superior performance for both ingest and query workloads.},
Author = {Sasha Ames and Maya Gokhale and Carlos Maltzahn},
Date-Added = {2012-02-27 18:02:43 +0000},
Date-Modified = {2020-01-05 05:32:03 -0700},
Journal = {International Journal of Parallel, Emergent and Distributed Systems},
Keywords = {papers, metadata, management, graphs, filesystems, datamanagement},
Number = {2},
Title = {QMDS: a file system metadata management service supporting a graph data model-based query language},
Volume = {27},
Year = {2012},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxARQS9hbWVzLXBlZHMxMi5wZGZPEQFcAAAAAAFcAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8PYW1lcy1wZWRzMTIucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAUEAAAIANy86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6QTphbWVzLXBlZHMxMi5wZGYAAA4AIAAPAGEAbQBlAHMALQBwAGUAZABzADEAMgAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAIi9NeSBEcml2ZS9QYXBlcnMvQS9hbWVzLXBlZHMxMi5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOAAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGY}}
@inproceedings{liu:ppam11,
Abstract = {Exascale supercomputers will have the potential for billion-way parallelism. While physical implementations of these systems are currently not available, HPC system designers can develop models of exascale systems to evaluate system design points. Modeling these systems and associated subsystems is a significant challenge. In this paper, we present the Co-design of Exascale Storage System (CODES) framework for evaluating exascale storage system design points. As part of our early work with CODES, we discuss the use of the CODES framework to simulate leadership-scale storage systems in a tractable amount of time using parallel discrete-event simulation. We describe the current storage system models and protocols included with the CODES framework and demonstrate the use of CODES through simulations of an existing petascale storage system.
},
Address = {Torun, Poland},
Author = {Ning Liu and Christopher Carothers and Jason Cope and Philip Carns and Robert Ross and Adam Crume and Carlos Maltzahn},
Booktitle = {PPAM 2011},
Date-Added = {2012-01-17 01:13:05 +0000},
Date-Modified = {2020-01-05 05:32:41 -0700},
Keywords = {papers, simulation, exascale, storage, systems, parallel, filesystems, hpc},
Month = {September 11-14},
Title = {Modeling a Leadership-scale Storage System},
Year = {2011},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAQTC9saXUtcHBhbTExLnBkZk8RAVgAAAAAAVgAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////w5saXUtcHBhbTExLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABTAAAAgA2LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpMOmxpdS1wcGFtMTEucGRmAA4AHgAOAGwAaQB1AC0AcABwAGEAbQAxADEALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACEvTXkgRHJpdmUvUGFwZXJzL0wvbGl1LXBwYW0xMS5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADcAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABkw==}}
@inproceedings{buck:sc11,
Abstract = {Hadoop has become the de facto platform for large-scale data analysis in commercial applications, and increasingly so in scientific applications. However, Hadoop's byte stream data model causes inefficiencies when used to process scientific data that is commonly stored in highly-structured, array-based binary file formats resulting in limited scalability of Hadoop applications in science. We introduce SciHadoop, a Hadoop plugin allowing scientists to specify logical queries over array-based data models. SciHadoop executes queries as map/reduce programs defined over the logical data model. We describe the implementation of a SciHadoop prototype for NetCDF data sets and quantify the performance of five separate optimizations that address the following goals for several representative aggregate queries: reduce total data transfers, reduce remote reads, and reduce unnecessary reads. Two optimizations allow holistic aggregate queries to be evaluated opportunistically during the map phase; two additional optimizations intelligently partition input data to increase read locality, and one optimization avoids block scans by examining the data dependencies of an executing query to prune input partitions. Experiments involving a holistic function show run-time improvements of up to 8x, with drastic reductions of IO, both locally and over the network.},
Address = {Seattle, WA},
Author = {Joe Buck and Noah Watkins and Jeff LeFevre and Kleoni Ioannidou and Carlos Maltzahn and Neoklis Polyzotis and Scott A. Brandt},
Booktitle = {SC '11},
Date-Added = {2011-08-02 22:58:10 +0000},
Date-Modified = {2020-01-05 05:34:48 -0700},
Keywords = {papers, mapreduce, datamanagement, hpc, structured, netcdf},
Month = {November},
Read = {1},
Title = {SciHadoop: Array-based Query Processing in Hadoop},
Year = {2011},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAPQi9idWNrLXNjMTEucGRmTxEBVAAAAAABVAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////DWJ1Y2stc2MxMS5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFCAAACADUvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOkI6YnVjay1zYzExLnBkZgAADgAcAA0AYgB1AGMAawAtAHMAYwAxADEALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACAvTXkgRHJpdmUvUGFwZXJzL0IvYnVjay1zYzExLnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA2AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAY4=}}
@inproceedings{ames:nas11,
Address = {Dalian, China},
Author = {Sasha Ames and Maya B. Gokhale and Carlos Maltzahn},
Booktitle = {NAS 2011},
Date-Added = {2011-05-26 23:15:19 -0700},
Date-Modified = {2011-05-26 23:17:11 -0700},
Keywords = {papers, metadata, graphs, linking, filesystems},
Month = {July 28-30},
Title = {QMDS: A File System Metadata Management Service Supporting a Graph Data Model-based Query Language},
Year = {2011}}
@inproceedings{pineiro:rtas11,
Abstract = {Real-time systems and applications are becoming increasingly complex and often comprise multiple communicating tasks. The management of the individual tasks is well-understood, but the interaction of communicating tasks with different timing characteristics is less well-understood. We discuss several representative inter-task communication flows via reserved memory buffers (possibly interconnected via a real-time network) and present RAD-Flows, a model for managing these interactions. We provide proofs and simulation results demonstrating the correctness and effectiveness of RAD-Flows, allowing system designers to determine the amount of memory required based upon the characteristics of the interacting tasks and to guarantee real-time operation of the system as a whole.},
Address = {Chicago, IL},
Author = {Roberto Pineiro and Kleoni Ioannidou and Carlos Maltzahn and Scott A. Brandt},
Booktitle = {RTAS 2011},
Date-Added = {2010-12-15 12:11:43 -0800},
Date-Modified = {2020-01-05 05:37:41 -0700},
Keywords = {papers, memory, realtime, qos, performance, management},
Month = {April 11-14},
Title = {RAD-FLOWS: Buffering for Predictable Communication},
Year = {2011},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAUUC9waW5laXJvLXJ0YXMxMS5wZGZPEQFoAAAAAAFoAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8ScGluZWlyby1ydGFzMTEucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAVAAAAIAOi86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6UDpwaW5laXJvLXJ0YXMxMS5wZGYADgAmABIAcABpAG4AZQBpAHIAbwAtAHIAdABhAHMAMQAxAC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAlL015IERyaXZlL1BhcGVycy9QL3BpbmVpcm8tcnRhczExLnBkZgAAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOwAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGn}}
@article{maltzahn:login10,
Abstract = {The Hadoop Distributed File System (HDFS) has a single metadata server that sets a hard limit on its maximum size. Ceph, a high-performance distributed file system under development since 2005 and now supported in Linux, bypasses the scaling limits of HDFS. We describe Ceph and its elements and provide instructions for installing a demonstration system that can be used with Hadoop.},
Author = {Carlos Maltzahn and Esteban Molina-Estolano and Amandeep Khurana and Alex J. Nelson and Scott A. Brandt and Sage A. Weil},
Date-Added = {2010-09-30 15:19:48 -0700},
Date-Modified = {2020-01-05 05:43:26 -0700},
Journal = {;login: The USENIX Magazine},
Keywords = {papers, filesystems, parallel, hadoop, mapreduce, storage},
Number = {4},
Title = {Ceph as a Scalable Alternative to the Hadoop Distributed File System},
Volume = {35},
Year = {2010},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAWTS9tYWx0emFobi1sb2dpbjEwLnBkZk8RAXAAAAAAAXAAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xRtYWx0emFobi1sb2dpbjEwLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABTQAAAgA8LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpNOm1hbHR6YWhuLWxvZ2luMTAucGRmAA4AKgAUAG0AYQBsAHQAegBhAGgAbgAtAGwAbwBnAGkAbgAxADAALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACcvTXkgRHJpdmUvUGFwZXJzL00vbWFsdHphaG4tbG9naW4xMC5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkAD0AAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABsQ==}}
@inproceedings{brandt:pdsw09,
Abstract = {File systems are the backbone of large-scale data processing for scientific applications. Motivated by the need to provide an extensible and flexible framework beyond the abstractions provided by API libraries for files to manage and analyze large-scale data, we are developing Damasc, an enhanced file system where rich data management services for scientific computing are provided as a native part of the file system.
This paper presents our vision for Damasc, a performant file system that would allow scientists or even casual users to pose declarative queries and updates over views of underlying files that are stored in their native bytestream format. In Damasc, a configurable layer is added on top of the file system to expose the contents of files in a logical data model through which views can be defined and used for queries and updates. The logical data model and views are leveraged to optimize access to files through caching and self-organizing indexing. In addition, provenance capture and analysis to file access is also built into Damasc. We describe the salient features of our proposal and discuss how it can benefit the development of scientific code.
},
Address = {Portland, OR},
Author = {Scott A. Brandt and Carlos Maltzahn and Neoklis Polyzotis and Wang-Chiew Tan},
Booktitle = {Proceedings of the 2009 ACM Petascale Data Storage Workshop (PDSW 09)},
Date-Added = {2010-01-26 23:50:43 -0800},
Date-Modified = {2020-01-05 05:49:01 -0700},
Keywords = {papers, datamanagement, filesystems},
Month = {November 15},
Title = {Fusing Data Management Services with File Systems},
Year = {2009},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxATQi9icmFuZHQtcGRzdzA5LnBkZk8RAWQAAAAAAWQAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xFicmFuZHQtcGRzdzA5LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABQgAAAgA5LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpCOmJyYW5kdC1wZHN3MDkucGRmAAAOACQAEQBiAHIAYQBuAGQAdAAtAHAAZABzAHcAMAA5AC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAkL015IERyaXZlL1BhcGVycy9CL2JyYW5kdC1wZHN3MDkucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADoAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABog==}}
@inproceedings{estolano:pdsw09,
Abstract = {MapReduce-tailored distributed filesystems---such as HDFS for Hadoop MapReduce---and parallel high-performance computing filesystems are tailored for considerably different workloads. The purpose of our work is to examine the performance of each filesystem when both sorts of workload run on it concurrently.
We examine two workloads on two filesystems. For the HPC workload, we use the IOR checkpointing benchmark and the Parallel Virtual File System, Version 2 (PVFS); for Hadoop, we use an HTTP attack classifier and the CloudStore filesystem. We analyze the performance of each file system when it concurrently runs its ``native'' workload as well as the non-native workload.},
Address = {Portland, OR},
Author = {Esteban Molina-Estolano and Maya Gokhale and Carlos Maltzahn and John May and John Bent and Scott Brandt},
Booktitle = {Proceedings of the 2009 ACM Petascale Data Storage Workshop (PDSW 09)},
Date-Added = {2010-01-03 23:04:09 -0800},
Date-Modified = {2020-01-05 05:51:32 -0700},
Keywords = {papers, performance, hpc, mapreduce, filesystems},
Month = {November 15},
Title = {Mixing Hadoop and HPC Workloads on Parallel Filesystems},
Year = {2009},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAXRS1GL2VzdG9sYW5vLXBkc3cwOS5wZGZPEQFyAAAAAAFyAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8TZXN0b2xhbm8tcGRzdzA5LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAA0UtRgAAAgA9LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpFLUY6ZXN0b2xhbm8tcGRzdzA5LnBkZgAADgAoABMAZQBzAHQAbwBsAGEAbgBvAC0AcABkAHMAdwAwADkALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACgvTXkgRHJpdmUvUGFwZXJzL0UtRi9lc3RvbGFuby1wZHN3MDkucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkAD4AAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABtA==}}
@inproceedings{bigelow:pdsw07,
Abstract = {Many applications---for example, scientific simulation, real-time data acquisition, and distributed reservation systems---have I/O performance requirements, yet most large, distributed storage systems lack the ability to guarantee I/O performance. We are working on end-to-end performance management in scalable, distributed storage systems. The kinds of storage systems we are targeting include large high-performance computing (HPC) clusters, which require both large data volumes and high I/O rates, as well as large-scale general-purpose storage systems.},
Address = {Reno, NV},
Author = {David Bigelow and Suresh Iyer and Tim Kaldewey and Roberto Pineiro and Anna Povzner and Scott A. Brandt and Richard Golding and Theodore Wong and Carlos Maltzahn},
Booktitle = {Proceedings of the 2007 ACM Petascale Data Storage Workshop (PDSW 07)},
Date-Added = {2009-09-29 12:08:09 -0700},
Date-Modified = {2020-01-05 05:56:32 -0700},
Keywords = {papers, performance, management, distributed, storage, scalable},
Title = {End-to-end Performance Management for Scalable Distributed Storage},
Year = {2007},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAUQi9iaWdlbG93LXBkc3cwNy5wZGZPEQFoAAAAAAFoAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8SYmlnZWxvdy1wZHN3MDcucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAUIAAAIAOi86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6QjpiaWdlbG93LXBkc3cwNy5wZGYADgAmABIAYgBpAGcAZQBsAG8AdwAtAHAAZABzAHcAMAA3AC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAlL015IERyaXZlL1BhcGVycy9CL2JpZ2Vsb3ctcGRzdzA3LnBkZgAAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOwAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGn}}
@inproceedings{buck:dadc09,
Abstract = {High-end computing is increasingly I/O bound as computations become more data-intensive, and data transport technologies struggle to keep pace with the demands of large-scale, distributed computations. One approach to avoiding unnecessary I/O is to move the processing to the data, as seen in Google's successful, but relatively specialized, MapReduce system. This paper discusses our investigation towards a general solution for enabling in-situ computation in a peta-scale storage system. We believe our work with flexible, application-specific structured storage is the key to addressing the I/O overhead caused by data partitioning across storage nodes. In order to manage competing workloads on storage nodes, our research in system performance management is leveraged. Our ultimate goal is a general framework for in-situ data-intensive processing, indexing, and searching, which we expect to provide orders of magnitude performance increases for data-intensive workloads.},
Address = {Munich, Germany},
Author = {Joe Buck and Noah Watkins and Carlos Maltzahn and Scott A. Brandt},
Booktitle = {2nd International Workshop on Data-Aware Distributed Computing (in conjunction with HPDC-18)},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:01:11 -0700},
Keywords = {papers, filesystems, programmable},
Month = {June 9},
Title = {Abstract Storage: Moving file format-specific abstractions into petabyte-scale storage systems},
Year = {2009},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxARQi9idWNrLWRhZGMwOS5wZGZPEQFcAAAAAAFcAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8PYnVjay1kYWRjMDkucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAUIAAAIANy86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6QjpidWNrLWRhZGMwOS5wZGYAAA4AIAAPAGIAdQBjAGsALQBkAGEAZABjADAAOQAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAIi9NeSBEcml2ZS9QYXBlcnMvQi9idWNrLWRhZGMwOS5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOAAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGY}}
@inproceedings{brandt:ospert08,
Abstract = {Real-time systems are growing in size and complexity and must often manage multiple competing tasks in environments where CPU is not the only limited shared resource. Memory, network, and other devices may also be shared and system-wide performance guarantees may require the allocation and scheduling of many diverse resources. We present our on-going work on performance management in a representative distributed real-time system---a distributed storage system with performance requirements---and discuss our integrated model for managing diverse resources to provide end-to-end performance guarantees.
},
Address = {Prague, Czech Republic},
Author = {Scott A. Brandt and Carlos Maltzahn and Anna Povzner and Roberto Pineiro and Andrew Shewmaker and Tim Kaldewey},
Booktitle = {OSPERT 2008},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:01:44 -0700},
Keywords = {papers, storage, systems, distributed, performance, management, qos, realtime},
Month = {July},
Title = {An Integrated Model for Performance Management in a Distributed System},
Year = {2008},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAVQi9icmFuZHQtb3NwZXJ0MDgucGRmTxEBbAAAAAABbAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////E2JyYW5kdC1vc3BlcnQwOC5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFCAAACADsvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOkI6YnJhbmR0LW9zcGVydDA4LnBkZgAADgAoABMAYgByAGEAbgBkAHQALQBvAHMAcABlAHIAdAAwADgALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACYvTXkgRHJpdmUvUGFwZXJzL0IvYnJhbmR0LW9zcGVydDA4LnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA8AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAaw=}}
@article{estolano:jpcs09,
Abstract = {Parallel file systems are gaining in popularity in high-end computing centers as well as commercial data centers. High-end computing systems are expected to scale exponentially and to pose new challenges to their storage scalability in terms of cost and power. To address these challenges scientists and file system designers will need a thorough understanding of the design space of parallel file systems. Yet there exist few systematic studies of parallel file system behavior at petabyte- and exabyte scale. An important reason is the significant cost of getting access to large-scale hardware to test parallel file systems. To contribute to this understanding we are building a parallel file system simulator that can simulate parallel file systems at very large scale. Our goal is to simulate petabyte-scale parallel file systems on a small cluster or even a single machine in reasonable time and fidelity. With this simulator, file system experts will be able to tune existing file systems for specific workloads, scientists and file system deployment engineers will be able to better communicate workload requirements, file system designers and researchers will be able to try out design alternatives and innovations at scale, and instructors will be able to study very large-scale parallel file system behavior in the class room. In this paper we describe our approach and provide preliminary results that are encouraging both in terms of fidelity and simulation scalability.},
Author = {Esteban Molina-Estolano and Carlos Maltzahn and John Bent and Scott A. Brandt},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:02:20 -0700},
Journal = {J. Phys.: Conf. Ser.},
Keywords = {papers, performance, simulation, filesystems},
Number = {012050},
Title = {Building a Parallel File System Simulator},
Volume = {126},
Year = {2009},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAXRS1GL2VzdG9sYW5vLWpwY3MwOS5wZGZPEQFyAAAAAAFyAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8TZXN0b2xhbm8tanBjczA5LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAA0UtRgAAAgA9LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpFLUY6ZXN0b2xhbm8tanBjczA5LnBkZgAADgAoABMAZQBzAHQAbwBsAGEAbgBvAC0AagBwAGMAcwAwADkALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACgvTXkgRHJpdmUvUGFwZXJzL0UtRi9lc3RvbGFuby1qcGNzMDkucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkAD4AAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABtA==}}
@inproceedings{weil:osdi06,
Abstract = {provides excellent performance, reliability, and scalability. Ceph maximizes the separation between data and metadata management by replacing allocation tables with a pseudo-random data distribution function (CRUSH) designed for heterogeneous and dynamic clusters of unreliable object storage devices (OSDs). We leverage device intelligence by distributing data replication, failure detection and recovery to semi-autonomous OSDs running a specialized local object file system. A dynamic distributed metadata cluster provides extremely efficient metadata management and seamlessly adapts to a wide range of general purpose and scientific computing file system workloads. Performance measurements under a variety of workloads show that Ceph has excellent I/O performance and scalable metadata management, supporting more than 250,000 metadata operations per second.
},
Address = {Seattle, WA},
Author = {Sage A. Weil and Scott A. Brandt and Ethan L. Miller and Darrell D. E. Long and Carlos Maltzahn},
Booktitle = {OSDI'06},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:03:57 -0700},
Keywords = {papers, parallel, filesystems, distributed, storage, systems, obsd, p2p},
Month = {November},
Read = {1},
Title = {{Ceph}: A Scalable, High-Performance Distributed File System},
Year = 2006,
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxARVy93ZWlsLW9zZGkwNi5wZGZPEQFcAAAAAAFcAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8Pd2VpbC1vc2RpMDYucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAVcAAAIANy86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6Vzp3ZWlsLW9zZGkwNi5wZGYAAA4AIAAPAHcAZQBpAGwALQBvAHMAZABpADAANgAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAIi9NeSBEcml2ZS9QYXBlcnMvVy93ZWlsLW9zZGkwNi5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOAAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGY}}
@inproceedings{maltzahn:chi95,
Abstract = {In a research community each research er knows only a small fraction of the vast number of tools offered in the continually changing environment of local computer networks. Since the on-line or off-line documentation for these tools poorly support people in finding the best tool for a given task, users prefer to ask colleagues. however, finding the right person to ask can be time consuming and asking questions can reveal incompetence. In this paper we present an architecture to a community sensitive help system which actively collects information about Unix tools by tapping into accounting information generated by the operating system and by interviewing users that are selected on the basis of collected information. The result is a help system that continually seeks to update itself, that contains information that is entirely based on the community's perspective on tools, and that consequently grows with the community and its dynamic environments.},
Address = {Denver, CO},
Author = {Carlos Maltzahn},
Booktitle = {CHI '95},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:06:12 -0700},
Keywords = {papers, cscw},
Month = {May},
Title = {Community Help: Discovering Tools and Locating Experts in a Dynamic Environment},
Year = {1995},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAUTS9tYWx0emFobi1jaGk5NS5wZGZPEQFoAAAAAAFoAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8SbWFsdHphaG4tY2hpOTUucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAU0AAAIAOi86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6TTptYWx0emFobi1jaGk5NS5wZGYADgAmABIAbQBhAGwAdAB6AGEAaABuAC0AYwBoAGkAOQA1AC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAlL015IERyaXZlL1BhcGVycy9NL21hbHR6YWhuLWNoaTk1LnBkZgAAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOwAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGn}}
@inproceedings{weil:sc06,
Abstract = {Emerging large-scale distributed storage systems are faced with the task of distributing petabytes of data among tens or hundreds of thousands of storage devices. Such systems must evenly distribute data and workload to efficiently utilize available resources and maximize system performance, while facilitating system growth and managing hardware failures. We have developed CRUSH, a scalable pseudo-random data distribution function designed for distributed object-based storage systems that efficiently maps data objects to storage devices without relying on a central directory. Because large systems are inherently dynamic, CRUSH is designed to facilitate the addition and removal of storage while minimizing unnecessary data movement. The algorithm accommodates a wide variety of data replication and reliability mechanisms and distributes data in terms of user-defined policies that enforce separation of replicas across failure domains.},
Address = {Tampa, FL},
Author = {Sage A. Weil and Scott A. Brandt and Ethan L. Miller and Carlos Maltzahn},
Booktitle = {SC '06},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:10:11 -0700},
Keywords = {papers, hashing, parallel, filesystems, placement, related:ceph, obsd},
Month = {November},
Publisher = {ACM},
Title = {{CRUSH}: Controlled, Scalable, Decentralized Placement of Replicated Data},
Year = {2006},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAPVy93ZWlsLXNjMDYucGRmTxEBVAAAAAABVAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////DXdlaWwtc2MwNi5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFXAAACADUvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOlc6d2VpbC1zYzA2LnBkZgAADgAcAA0AdwBlAGkAbAAtAHMAYwAwADYALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACAvTXkgRHJpdmUvUGFwZXJzL1cvd2VpbC1zYzA2LnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA2AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAY4=}}
@article{povzner:osr08,
Abstract = {Guaranteed I/O performance is needed for a variety of applications ranging from real-time data collection to desktop multimedia to large-scale scientific simulations. Reservations on throughput, the standard measure of disk performance, fail to effectively manage disk performance due to the orders of magnitude difference between best-, average-, and worst-case response times, allowing reservation of less than 0.01% of the achievable bandwidth. We show that by reserving disk resources in terms of utilization it is possible to create a disk scheduler that supports reservation of nearly 100% of the disk resources, provides arbitrarily hard or soft guarantees depending upon application needs, and yields efficiency as good or better than best-effort disk schedulers tuned for performance. We present the architecture of our scheduler, prove the correctness of its algorithms, and provide results demonstrating its effectiveness.},
Author = {Anna Povzner and Tim Kaldewey and Scott A. Brandt and Richard Golding and Theodore Wong and Carlos Maltzahn},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:12:06 -0700},
Journal = {Operating Systems Review},
Keywords = {papers, predictable, performance, storage, media, realtime},
Month = {May},
Number = {4},
Pages = {13-25},
Title = {Efficient Guaranteed Disk Request Scheduling with Fahrrad},
Volume = {42},
Year = {2008},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxATUC9wb3Z6bmVyLW9zcjA4LnBkZk8RAWQAAAAAAWQAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////xFwb3Z6bmVyLW9zcjA4LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABUAAAAgA5LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpQOnBvdnpuZXItb3NyMDgucGRmAAAOACQAEQBwAG8AdgB6AG4AZQByAC0AbwBzAHIAMAA4AC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAkL015IERyaXZlL1BhcGVycy9QL3BvdnpuZXItb3NyMDgucGRmABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADoAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABog==}}
@inproceedings{povzner:eurosys08,
Abstract = {Guaranteed I/O performance is needed for a variety of applications ranging from real-time data collection to desktop multimedia to large-scale scientific simulations. Reservations on throughput, the standard measure of disk performance, fail to effectively manage disk performance due to the orders of magnitude difference between best-, average-, and worst-case response times, allowing reservation of less than 0.01% of the achievable bandwidth. We show that by reserving disk resources in terms of utilization it is possible to create a disk scheduler that supports reservation of nearly 100% of the disk resources, provides arbitrarily hard or soft guarantees depending upon application needs, and yields efficiency as good or better than best-effort disk schedulers tuned for performance. We present the architecture of our scheduler, prove the correctness of its algorithms, and provide results demonstrating its effectiveness.},
Address = {Glasgow, Scottland},
Author = {Anna Povzner and Tim Kaldewey and Scott A. Brandt and Richard Golding and Theodore Wong and Carlos Maltzahn},
Booktitle = {Eurosys 2008},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:12:26 -0700},
Keywords = {papers, performance, management, storage, systems, fahrrad, rbed, realtime, qos},
Month = {March 31 - April 4},
Title = {Efficient Guaranteed Disk Request Scheduling with Fahrrad},
Year = {2008},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAXUC9wb3Z6bmVyLWV1cm9zeXMwOC5wZGZPEQF0AAAAAAF0AAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8VcG92em5lci1ldXJvc3lzMDgucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAVAAAAIAPS86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6UDpwb3Z6bmVyLWV1cm9zeXMwOC5wZGYAAA4ALAAVAHAAbwB2AHoAbgBlAHIALQBlAHUAcgBvAHMAeQBzADAAOAAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAKC9NeSBEcml2ZS9QYXBlcnMvUC9wb3Z6bmVyLWV1cm9zeXMwOC5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAPgAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAG2}}
@article{maltzahn:ddas07,
Abstract = {Managing storage in the face of relentless growth in the number and variety of files on storage systems creates demand for rich file system metadata as is made evident by the recent emergence of rich metadata support in many applications as well as file systems. Yet, little support exists for sharing metadata across file systems even though it is not uncommon for users to manage multiple file systems and to frequently share copies of files across devices and with other users. Encouraged by the surge in popularity for collaborative bookmarking sites that share the burden of creating metadata for online content [21] we present Graffiti, a distributed organization layer for collaboratively sharing rich metadata across heterogeneous file systems. The primary purpose of Graffiti is to provide a research and rapid prototyping platform for managing metadata across file systems and users.},
Author = {Carlos Maltzahn and Nikhil Bobb and Mark W. Storer and Damian Eads and Scott A. Brandt and Ethan L. Miller},
Booktitle = {Distributed Data \& Structures 7},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:13:12 -0700},
Editor = {Thomas Schwarz},
Journal = {Proceedings in Informatics},
Keywords = {papers, pim, tagging, distributed, naming, linking, metadata},
Pages = {97-111},
Publisher = {Carleton Scientific},
Read = {Yes},
Title = {Graffiti: A Framework for Testing Collaborative Distributed Metadata},
Volume = {21},
Year = {2007},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAVTS9tYWx0emFobi1kZGFzMDcucGRmTxEBbAAAAAABbAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////E21hbHR6YWhuLWRkYXMwNy5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFNAAACADsvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOk06bWFsdHphaG4tZGRhczA3LnBkZgAADgAoABMAbQBhAGwAdAB6AGEAaABuAC0AZABkAGEAcwAwADcALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACYvTXkgRHJpdmUvUGFwZXJzL00vbWFsdHphaG4tZGRhczA3LnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA8AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAaw=}}
@inproceedings{rose:caise92,
Abstract = {Repositories provide the information system's support to layer software environments. Initially, repository technology has been dominated by object representation issues. Teams are not part of the ball game. In this paper, we propose the concept of sharing processes which supports distribution and sharing of objects and tasks by teams. Sharing processes are formally specified as classes of non-deterministic f'mite automata connected to each other by deduction rules. They are intended to coordinate object access and communication for task distribution in large development projects. In particular, we show how interactions between both sharings improve object management.},
Address = {Manchester, UK},
Author = {Thomas Rose and Carlos Maltzahn and Matthias Jarke},
Booktitle = {Advanced Information Systems Engineering (CAiSE'92)},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:14:53 -0700},
Editor = {Pericles Loucopoulos},
Keywords = {papers, sharing, cscw, datamanagement},
Month = {May 12--15},
Pages = {17--32},
Publisher = {Springer Berlin / Heidelberg},
Series = {Lecture Notes in Computer Science},
Title = {Integrating object and agent worlds},
Volume = {593},
Year = {1992},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAUUS1SL3Jvc2UtY2Fpc2U5Mi5wZGZPEQFmAAAAAAFmAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8Qcm9zZS1jYWlzZTkyLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAA1EtUgAAAgA6LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpRLVI6cm9zZS1jYWlzZTkyLnBkZgAOACIAEAByAG8AcwBlAC0AYwBhAGkAcwBlADkAMgAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAJS9NeSBEcml2ZS9QYXBlcnMvUS1SL3Jvc2UtY2Fpc2U5Mi5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADsAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABpQ==}}
@inproceedings{ames:mss06,
Abstract = {As the number and variety of files stored and accessed by a typical user has dramatically increased, existing file system structures have begun to fail as a mechanism for managing all of the information contained in those files. Many applications---email clients, multimedia management applications, and desktop search engines are examples--- have been forced to develop their own richer metadata infrastructures. While effective, these solutions are generally non-standard, non-portable, non-sharable across applications, users or platforms, proprietary, and potentially inefficient. In the interest of providing a rich, efficient, shared file system metadata infrastructure, we have developed the Linking File System (LiFS). Taking advantage of non-volatile storage class memories, LiFS supports a wide variety of user and application metadata needs while efficiently supporting traditional file system operations.},
Address = {College Park, MD},
Author = {Sasha Ames and Nikhil Bobb and Kevin M. Greenan and Owen S. Hofmann and Mark W. Storer and Carlos Maltzahn and Ethan L. Miller and Scott A. Brandt},
Booktitle = {MSST '06},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:15:24 -0700},
Keywords = {papers, linking, systems, storage, metadata, storagemedium, related:quasar, filesystems},
Local-Url = {/Users/carlosmalt/Documents/Papers/ames-mss06.pdf},
Month = {May},
Organization = {IEEE},
Title = {{LiFS}: An Attribute-Rich File System for Storage Class Memories},
Year = {2006},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAQQS9hbWVzLW1zczA2LnBkZk8RAVgAAAAAAVgAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////w5hbWVzLW1zczA2LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABQQAAAgA2LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpBOmFtZXMtbXNzMDYucGRmAA4AHgAOAGEAbQBlAHMALQBtAHMAcwAwADYALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACEvTXkgRHJpdmUvUGFwZXJzL0EvYW1lcy1tc3MwNi5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADcAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABkw==}}
@inproceedings{maltzahn:wcw99,
Abstract = {The bandwidth usage due to HTTP traffic often varies considerably over the course of a day, requiring high network performance during peak periods while leaving network resources unused during off-peak periods. We show that using these extra network resources to prefetch web content during off-peak periods can significantly reduce peak bandwidth usage without compromising cache consistency. With large HTTP traffic variations it is therefore feasible to apply ``bandwidth smoothing'' to reduce the cost and the required capacity of a network infrastructure. In addition to reducing the peak network demand, bandwidth smoothing improves cache hit rates. We apply machine learning techniques to automatically develop prefetch strategies that have high accuracy. Our results are based on web proxy traces generated at a large corporate Internet exchange point and data collected from recent scans of popular web sites},
Address = {San Diego, CA},
Author = {Carlos Maltzahn and Kathy Richardson and Dirk Grunwald and James Martin},
Booktitle = {4th International Web Caching Workshop (WCW'99)},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:17:30 -0700},
Keywords = {papers, networking, intermediary, machinelearning, webcaching},
Month = {March 31 - April 2},
Title = {On Bandwidth Smoothing},
Year = {1999},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAUTS9tYWx0emFobi13Y3c5OS5wZGZPEQFoAAAAAAFoAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8SbWFsdHphaG4td2N3OTkucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAU0AAAIAOi86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6TTptYWx0emFobi13Y3c5OS5wZGYADgAmABIAbQBhAGwAdAB6AGEAaABuAC0AdwBjAHcAOQA5AC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAlL015IERyaXZlL1BhcGVycy9NL21hbHR6YWhuLXdjdzk5LnBkZgAAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOwAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGn}}
@article{maltzahn:per97,
Abstract = {Enterprise level web proxies relay world-wide web traffic between private networks and the Internet. They improve security, save network bandwidth, and reduce network latency. While the performance of web proxies has been analyzed based on synthetic workloads, little is known about their performance on real workloads. In this paper we present a study of two web proxies (CERN and Squid) executing real workloads on Digital's Palo Alto Gateway. We demonstrate that the simple CERN proxy architecture outperforms all but the latest version of Squid and continues to outperform cacheless configurations. For the measured load levels the Squid proxy used at least as many CPU, memory, and disk resources as CERN, in some configurations significantly more resources. At higher load levels the resource utilization requirements will cross and Squid will be the one using fewer resources. Lastly we found that cache hit rates of around 30% had very little effect on the requests service time.},
Author = {Carlos Maltzahn and Kathy Richardson and Dirk Grunwald},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:18:29 -0700},
Journal = {ACM SIGMETRICS Performance Evaluation Review},
Keywords = {papers, performance, webcaching, networking, intermediary},
Month = {June},
Number = {1},
Pages = {13-23},
Title = {Performance Issues of Enterprise Level Web Proxies},
Volume = {25},
Year = {1997},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAbTS9tYWx0emFobi1zaWdtZXRyaWNzOTcucGRmTxEBhAAAAAABhAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////GW1hbHR6YWhuLXNpZ21ldHJpY3M5Ny5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFNAAACAEEvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOk06bWFsdHphaG4tc2lnbWV0cmljczk3LnBkZgAADgA0ABkAbQBhAGwAdAB6AGEAaABuAC0AcwBpAGcAbQBlAHQAcgBpAGMAcwA5ADcALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACwvTXkgRHJpdmUvUGFwZXJzL00vbWFsdHphaG4tc2lnbWV0cmljczk3LnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJABCAAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAco=}}
@inproceedings{maltzahn:sigmetrics97,
Abstract = {Enterprise level web proxies relay world-wide web traffic between private networks and the Internet. They improve security, save network bandwidth, and reduce network latency. While the performance of web proxies has been analyzed based on synthetic workloads, little is known about their performance on real workloads. In this paper we present a study of two web proxies (CERN and Squid) executing real workloads on Digital's Palo Alto Gateway. We demonstrate that the simple CERN proxy architecture outperforms all but the latest version of Squid and continues to outperform cacheless configurations. For the measured load levels the Squid proxy used at least as many CPU, memory, and disk resources as CERN, in some configurations significantly more resources. At higher load levels the resource utilization requirements will cross and Squid will be the one using fewer resources. Lastly we found that cache hit rates of around 30% had very little effect on the requests service time.},
Address = {Seattle, WA},
Author = {Carlos Maltzahn and Kathy Richardson and Dirk Grunwald},
Booktitle = {SIGMETRICS 1997},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:19:28 -0700},
Keywords = {papers, performance, tracing, networking, intermediary, webcaching},
Month = {June 15-18},
Pages = {13--23},
Read = {Yes},
Title = {Performance Issues of Enterprise Level Web Proxies},
Year = {1997},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAbTS9tYWx0emFobi1zaWdtZXRyaWNzOTcucGRmTxEBhAAAAAABhAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////GW1hbHR6YWhuLXNpZ21ldHJpY3M5Ny5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFNAAACAEEvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOk06bWFsdHphaG4tc2lnbWV0cmljczk3LnBkZgAADgA0ABkAbQBhAGwAdAB6AGEAaABuAC0AcwBpAGcAbQBlAHQAcgBpAGMAcwA5ADcALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACwvTXkgRHJpdmUvUGFwZXJzL00vbWFsdHphaG4tc2lnbWV0cmljczk3LnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJABCAAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAco=}}
@inproceedings{weil:pdsw07,
Abstract = {Brick and object-based storage architectures have emerged as a means of improving the scalability of storage clusters. However, existing systems continue to treat storage nodes as passive devices, despite their ability to exhibit significant intelligence and autonomy. We present the design and implementation of RADOS, a reliable object storage service that can scales to many thousands of devices by leveraging the intelligence present in individual storage nodes. RADOS preserves consistent data access and strong safety semantics while allowing nodes to act semi-autonomously to self-manage replication, failure detection, and failure recovery through the use of a small cluster map. Our implementation offers excellent performance, reliability, and scalability while providing clients with the illusion of a single logical object store.},
Address = {Reno, NV},
Author = {Sage A. Weil and Andrew Leung and Scott A. Brandt and Carlos Maltzahn},
Booktitle = {Proceedings of the 2007 ACM Petascale Data Storage Workshop (PDSW 07)},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:20:07 -0700},
Keywords = {papers, obsd, distributed, storage, systems, related:x10},
Local-Url = {/Users/carlosmalt/Documents/Papers/weil-pdsw07.pdf},
Month = {November},
Title = {RADOS: A Fast, Scalable, and Reliable Storage Service for Petabyte-scale Storage Clusters},
Year = {2007},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxARVy93ZWlsLXBkc3cwNy5wZGZPEQFcAAAAAAFcAAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8Pd2VpbC1wZHN3MDcucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAVcAAAIANy86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6Vzp3ZWlsLXBkc3cwNy5wZGYAAA4AIAAPAHcAZQBpAGwALQBwAGQAcwB3ADAANwAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAIi9NeSBEcml2ZS9QYXBlcnMvVy93ZWlsLXBkc3cwNy5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAOAAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGY}}
@inproceedings{maltzahn:usenix99,
Abstract = {The dramatic increase of HTTP traffic on the Internet has resulted in wide-spread use of large caching proxy servers as critical Internet infrastructure components. With continued growth the demand for larger caches and higher performance proxies grows as well. The common bottleneck of large caching proxy servers is disk I/O. In this paper we evaluate ways to reduce the amount of required disk I/O. First we compare the file system interactions of two existing web proxy servers, CERN and SQUID. Then we show how design adjustments to the current SQUID cache architecture can dramatically reduce disk I/O. Our findings suggest two that strategies can significantly reduce disk I/O: (1) preserve locality of the HTTP reference stream while translating these references into cache references, and (2) use virtual memory instead of the file system for objects smaller than the system page size. The evaluated techniques reduced disk I/O by 50% to 70%.},
Address = {Monterey, CA},
Author = {Carlos Maltzahn and Kathy Richardson and Dirk Grunwald},
Booktitle = {USENIX ATC '99},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:20:58 -0700},
Keywords = {papers, networking, intermediary, storage, webcaching},
Month = {June 6-11},
Read = {Yes},
Title = {Reducing the Disk I/O of Web Proxy Server Caches},
Year = {1999},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAXTS9tYWx0emFobi11c2VuaXg5OS5wZGZPEQF0AAAAAAF0AAIAAAxHb29nbGUgRHJpdmUAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8VbWFsdHphaG4tdXNlbml4OTkucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAABAAMAAAoAY3UAAAAAAAAAAAAAAAAAAU0AAAIAPS86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6TTptYWx0emFobi11c2VuaXg5OS5wZGYAAA4ALAAVAG0AYQBsAHQAegBhAGgAbgAtAHUAcwBlAG4AaQB4ADkAOQAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAKC9NeSBEcml2ZS9QYXBlcnMvTS9tYWx0emFobi11c2VuaXg5OS5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAPgAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAG2}}
@inproceedings{ames:mss05,
Abstract = {Traditional file systems provide a weak and inadequate structure for meaningful representations of file interrelationships and other context-providing metadata. Existing designs, which store additional file-oriented metadata either in a database, on disk, or both are limited by the technologies upon which they depend. Moreover, they do not provide for user-defined relationships among files. To address these issues, we created the Linking File System (LiFS), a file system design in which files may have both arbitrary user- or application-specified attributes, and attributed links between files. In order to assure performance when accessing links and attributes, the system is designed to store metadata in non-volatile memory. This paper discusses several use cases that take advantage of this approach and describes the user-space prototype we developed to test the concepts presented.
},
Address = {Monterey, CA},
Author = {Alexander Ames and Nikhil Bobb and Scott A. Brandt and Adam Hiatt and Carlos Maltzahn and Ethan L. Miller and Alisa Neeman and Deepa Tuteja},
Booktitle = {MSST '05},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:21:32 -0700},
Keywords = {papers, ssrc, metadata, filesystems, linking},
Local-Url = {/Users/carlosmalt/Documents/Papers/ames-mss05.pdf},
Month = {April},
Title = {Richer File System Metadata Using Links and Attributes},
Year = {2005},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAQQS9hbWVzLW1zczA1LnBkZk8RAVgAAAAAAVgAAgAADEdvb2dsZSBEcml2ZQAAAAAAAAAAAAAAAAAAAAAAAABCRAAB/////w5hbWVzLW1zczA1LnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAAEAAwAACgBjdQAAAAAAAAAAAAAAAAABQQAAAgA2LzpWb2x1bWVzOkdvb2dsZURyaXZlOk15IERyaXZlOlBhcGVyczpBOmFtZXMtbXNzMDUucGRmAA4AHgAOAGEAbQBlAHMALQBtAHMAcwAwADUALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACEvTXkgRHJpdmUvUGFwZXJzL0EvYW1lcy1tc3MwNS5wZGYAABMAFC9Wb2x1bWVzL0dvb2dsZURyaXZl//8AAAAIAA0AGgAkADcAAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAABkw==}}
@inproceedings{koren:pdsw07,
Abstract = {As users interact with file systems of ever increasing size, it is becoming more difficult for them to familiarize themselves with the entire contents of the file system. In petabyte-scale systems, users must navigate a pool of billions of shared files in order to find the information they are looking for. One way to help alleviate this problem is to integrate navigation and search into a common framework.
One such method is faceted search. This method originated within the information retrieval community, and has proved popular for navigating large repositories, such as those in e-commerce sites and digital libraries. This paper introduces faceted search and outlines several current research directions in adapting faceted search techniques to petabyte-scale file systems.},
Address = {Reno, NV},
Author = {Jonathan Koren and Yi Zhang and Sasha Ames and Andrew Leung and Carlos Maltzahn and Ethan L. Miller},
Booktitle = {Proceedings of the 2007 ACM Petascale Data Storage Workshop (PDSW 07)},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:24:17 -0700},
Keywords = {papers, ir, filesystems, metadata, facets, search},
Month = {November},
Title = {Searching and Navigating Petabyte Scale File Systems Based on Facets},
Year = {2007},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxASSy9rb3Jlbi1wZHN3MDcucGRmTxEBYAAAAAABYAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////EGtvcmVuLXBkc3cwNy5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFLAAACADgvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOks6a29yZW4tcGRzdzA3LnBkZgAOACIAEABrAG8AcgBlAG4ALQBwAGQAcwB3ADAANwAuAHAAZABmAA8AGgAMAEcAbwBvAGcAbABlACAARAByAGkAdgBlABIAIy9NeSBEcml2ZS9QYXBlcnMvSy9rb3Jlbi1wZHN3MDcucGRmAAATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA5AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAZ0=}}
@article{jarke:ijicis92,
Abstract = {Information systems support for design environments emphasizes object management and tends to neglect the growing demand for team support. Process management is often tackled by rigid technological protocols which are likely to get in the way of group productivity and quality. Group tools must be introduced in an unobtrusive way which extends current practice yet provides structure and documentation of development experiences. The concept of sharing processes allows agents to coordinate the sharing of ideas, tasks, and results by interacting protocol automata which can be dynamically adapted to situational requirements. Inconsistency is managed with equal emphasis as consistency. The sharing process approach has been implemented in a system called ConceptTalk which has been experimentally integrated with design environments for information and hypertext systems.},
Author = {Matthias Jarke and Carlos Maltzahn and Thomas Rose},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:25:27 -0700},
Journal = {International Journal of Intelligent and Cooperative Information Systems},
Keywords = {papers, sharing, cscw, datamanagement},
Number = {1},
Pages = {145--167},
Title = {Sharing Processes: Team Coordination in Design Repositories},
Volume = {1},
Year = {1992},
Bdsk-Url-1 = {https://www.worldscientific.com/doi/abs/10.1142/S0218215792000076}}
@inproceedings{ellis:hicss97,
Abstract = {Chautauqua is an exploratory workflow management system designed and implemented within the Collaboration Technology Research group (CTRG) at the University of Colorado. This system represents a tightly knit merger of workflow technology and groupware technology. Chautauqua has been in test usage at the University of Colorado since 1995. This document discusses Chautauqua - its motivation, its design, and its implementation. Our emphasis here is on its novel features, and the techniques for implementing these features.},
Address = {Wailea, Maui, HI},
Author = {Clarence E. Ellis and Carlos Maltzahn},
Booktitle = {30th Hawaii International Conference on System Sciences, Information System Track},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:26:44 -0700},
Keywords = {papers, workflow, cscw},
Month = {January},
Title = {The Chautauqua Workflow System},
Year = {1997},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAVRS1GL2VsbGlzLWhpY3NzOTcucGRmTxEBagAAAAABagACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////EWVsbGlzLWhpY3NzOTcucGRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAANFLUYAAAIAOy86Vm9sdW1lczpHb29nbGVEcml2ZTpNeSBEcml2ZTpQYXBlcnM6RS1GOmVsbGlzLWhpY3NzOTcucGRmAAAOACQAEQBlAGwAbABpAHMALQBoAGkAYwBzAHMAOQA3AC4AcABkAGYADwAaAAwARwBvAG8AZwBsAGUAIABEAHIAaQB2AGUAEgAmL015IERyaXZlL1BhcGVycy9FLUYvZWxsaXMtaGljc3M5Ny5wZGYAEwAUL1ZvbHVtZXMvR29vZ2xlRHJpdmX//wAAAAgADQAaACQAPAAAAAAAAAIBAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAGq}}
@inproceedings{kaldewey:rtas08,
Abstract = {Large- and small-scale storage systems frequently serve a mixture of workloads, an increasing number of which require some form of performance guarantee. Providing guaranteed disk performance---the equivalent of a ``virtual disk''---is challenging because disk requests are non-preemptible and their execution times are stateful, partially non-deterministic, and can vary by orders of magnitude. Guaranteeing throughput, the standard measure of disk performance, requires worst-case I/O time assumptions orders of magnitude greater than average I/O times, with correspondingly low performance and poor control of the resource allocation. We show that disk time utilization--- analogous to CPU utilization in CPU scheduling and the only fully provisionable aspect of disk performance---yields greater control, more efficient use of disk resources, and better isolation between request streams than bandwidth or I/O rate when used as the basis for disk reservation and scheduling.},
Address = {St. Louis, Missouri},
Annote = {Springer Journal of Real-Time Systems Award for Best Student Paper},
Author = {Tim Kaldewey and Anna Povzner and Theodore Wong and Richard Golding and Scott A. Brandt and Carlos Maltzahn},
Booktitle = {RTAS 2008},
Date-Added = {2009-09-29 12:06:25 -0700},
Date-Modified = {2020-01-05 06:27:49 -0700},
Keywords = {papers, performance, management, storage, systems, fahrrad, rbed, qos},
Month = {April},
Title = {Virtualizing Disk Performance},
Year = {2008},
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxAVSy9rYWxkZXdleS1ydGFzMDgucGRmTxEBbAAAAAABbAACAAAMR29vZ2xlIERyaXZlAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////E2thbGRld2V5LXJ0YXMwOC5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAQADAAAKAGN1AAAAAAAAAAAAAAAAAAFLAAACADsvOlZvbHVtZXM6R29vZ2xlRHJpdmU6TXkgRHJpdmU6UGFwZXJzOks6a2FsZGV3ZXktcnRhczA4LnBkZgAADgAoABMAawBhAGwAZABlAHcAZQB5AC0AcgB0AGEAcwAwADgALgBwAGQAZgAPABoADABHAG8AbwBnAGwAZQAgAEQAcgBpAHYAZQASACYvTXkgRHJpdmUvUGFwZXJzL0sva2FsZGV3ZXktcnRhczA4LnBkZgATABQvVm9sdW1lcy9Hb29nbGVEcml2Zf//AAAACAANABoAJAA8AAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAaw=}}