-
Notifications
You must be signed in to change notification settings - Fork 3
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Initial implementation of HDFSBackedBlockRDD. #15
Changes from all commits
6f11db4
8b1b29c
bf0ac9b
6fc4cd8
7b49216
6be55a8
15d05d4
098cbd1
389acea
e70d390
cf39750
ea227f0
c4211d7
1fe3567
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package org.apache.spark.streaming.storage.rdd | ||
|
||
import scala.reflect.ClassTag | ||
|
||
import org.apache.hadoop.conf.Configuration | ||
|
||
import org.apache.spark.broadcast.Broadcast | ||
import org.apache.spark.rdd.BlockRDD | ||
import org.apache.spark.storage.{BlockId, StorageLevel} | ||
import org.apache.spark.streaming.storage.{FileSegment, HdfsUtils, WriteAheadLogRandomReader} | ||
import org.apache.spark._ | ||
|
||
private[streaming] | ||
class HDFSBackedBlockRDDPartition(val blockId: BlockId, idx: Int, val segment: FileSegment) | ||
extends Partition { | ||
val index = idx | ||
} | ||
|
||
private[streaming] | ||
class HDFSBackedBlockRDD[T: ClassTag]( | ||
@transient sc: SparkContext, | ||
@transient hadoopConfiguration: Configuration, | ||
@transient override val blockIds: Array[BlockId], | ||
@transient val segments: Array[FileSegment], | ||
val storeInBlockManager: Boolean, | ||
val storageLevel: StorageLevel | ||
) extends BlockRDD[T](sc, blockIds) { | ||
|
||
if (blockIds.length != segments.length) { | ||
throw new IllegalStateException("Number of block ids must be the same as number of segments!") | ||
} | ||
|
||
// Hadoop Configuration is not serializable, so broadcast it as a serializable. | ||
val broadcastedHadoopConf = sc.broadcast(new SerializableWritable(hadoopConfiguration)) | ||
.asInstanceOf[Broadcast[SerializableWritable[Configuration]]] | ||
override def getPartitions: Array[Partition] = { | ||
assertValid() | ||
(0 until blockIds.size).map { i => | ||
new HDFSBackedBlockRDDPartition(blockIds(i), i, segments(i)) | ||
}.toArray | ||
} | ||
|
||
override def compute(split: Partition, context: TaskContext): Iterator[T] = { | ||
assertValid() | ||
val hadoopConf = broadcastedHadoopConf.value.value | ||
val blockManager = SparkEnv.get.blockManager | ||
val partition = split.asInstanceOf[HDFSBackedBlockRDDPartition] | ||
val blockId = partition.blockId | ||
blockManager.get(blockId) match { | ||
// Data is in Block Manager, grab it from there. | ||
case Some(block) => | ||
block.data.asInstanceOf[Iterator[T]] | ||
// Data not found in Block Manager, grab it from HDFS | ||
case None => | ||
val reader = new WriteAheadLogRandomReader(partition.segment.path, hadoopConf) | ||
val dataRead = reader.read(partition.segment) | ||
reader.close() | ||
// Currently, we support storing the data to BM only in serialized form and not in | ||
// deserialized form | ||
if (storeInBlockManager) { | ||
blockManager.putBytes(blockId, dataRead, storageLevel) | ||
} | ||
dataRead.rewind() | ||
blockManager.dataDeserialize(blockId, dataRead).asInstanceOf[Iterator[T]] | ||
} | ||
} | ||
|
||
override def getPreferredLocations(split: Partition): Seq[String] = { | ||
val partition = split.asInstanceOf[HDFSBackedBlockRDDPartition] | ||
val locations = getBlockIdLocations() | ||
locations.getOrElse(partition.blockId, | ||
HdfsUtils.getBlockLocations(partition.segment.path, hadoopConfiguration) | ||
.getOrElse(new Array[String](0)).toSeq) | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,158 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package org.apache.spark.streaming.storage.rdd | ||
|
||
import java.io.File | ||
import java.util.concurrent.atomic.AtomicInteger | ||
|
||
import scala.collection.mutable.ArrayBuffer | ||
|
||
import com.google.common.io.Files | ||
import org.apache.hadoop.conf.Configuration | ||
import org.scalatest.{BeforeAndAfter, FunSuite} | ||
|
||
import org.apache.spark.storage.{BlockId, StorageLevel, StreamBlockId} | ||
import org.apache.spark.streaming.storage.{FileSegment, WriteAheadLogWriter} | ||
import org.apache.spark.{SparkConf, SparkContext} | ||
|
||
class HDFSBackedBlockRDDSuite extends FunSuite with BeforeAndAfter { | ||
// Name of the framework for Spark context | ||
def framework = this.getClass.getSimpleName | ||
|
||
// Master for Spark context | ||
def master = "local[2]" | ||
|
||
val conf = new SparkConf() | ||
.setMaster(master) | ||
.setAppName(framework) | ||
val sparkContext = new SparkContext(conf) | ||
val hadoopConf = new Configuration() | ||
val blockManager = sparkContext.env.blockManager | ||
// Since the same BM is reused in all tests, use an atomic int to generate ids | ||
val idGenerator = new AtomicInteger(0) | ||
var file: File = null | ||
var dir: File = null | ||
|
||
before { | ||
dir = Files.createTempDir() | ||
file = new File(dir, "BlockManagerWrite") | ||
} | ||
|
||
after { | ||
file.delete() | ||
dir.delete() | ||
} | ||
|
||
test("Verify all data is available when all data is in BM and HDFS") { | ||
doTestHDFSBackedRDD(5, 5, 20, 5) | ||
} | ||
|
||
test("Verify all data is available when all data is in BM but not in HDFS") { | ||
doTestHDFSBackedRDD(5, 0, 20, 5) | ||
} | ||
|
||
test("Verify all data is available when all data is in HDFS and no data is in BM") { | ||
doTestHDFSBackedRDD(0, 5, 20, 5) | ||
} | ||
|
||
test("Verify part of the data is in BM, and the remaining in HDFS") { | ||
doTestHDFSBackedRDD(3, 2, 20, 5) | ||
} | ||
|
||
/** | ||
* Write a bunch of events into the HDFS Block RDD. Put a part of all of them to the | ||
* BlockManager, so all reads need not happen from HDFS. | ||
* @param total - Total number of Strings to write | ||
* @param blockCount - Number of blocks to write (therefore, total # of events per block = | ||
* total/blockCount | ||
*/ | ||
private def doTestHDFSBackedRDD( | ||
writeToBMCount: Int, | ||
writeToHDFSCount: Int, | ||
total: Int, | ||
blockCount: Int | ||
) { | ||
val countPerBlock = total / blockCount | ||
val blockIds = (0 until blockCount).map { | ||
i => | ||
StreamBlockId(idGenerator.incrementAndGet(), idGenerator.incrementAndGet()) | ||
} | ||
|
||
val writtenStrings = generateData(total, countPerBlock) | ||
|
||
if (writeToBMCount != 0) { | ||
(0 until writeToBMCount).foreach { i => | ||
blockManager | ||
.putIterator(blockIds(i), writtenStrings(i).iterator, StorageLevel.MEMORY_ONLY_SER) | ||
} | ||
} | ||
|
||
val segments = new ArrayBuffer[FileSegment] | ||
if (writeToHDFSCount != 0) { | ||
// Generate some fake segments for the blocks in BM so the RDD does not complain | ||
segments ++= generateFakeSegments(writeToBMCount) | ||
segments ++= writeDataToHDFS(writtenStrings.slice(writeToBMCount, blockCount), | ||
blockIds.slice(writeToBMCount, blockCount)) | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct me if I am missing something, but from the code it seems like you are always writing to HDFS, never writing to BlockManager, and then using HDFSBackedBlockRDD to read it. It always reads from HDFS. Should not the tests be,
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually (4) is not required. I don't think we actually need to be resilient against that one. Even (2) is not required, but that can be done to ensure that we always read from BM if we have to. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No, (4) may definitely happen. What if a part of the RDD gets loaded from HDFS back into the BlockManager? And there are more jobs being run on that RDD? What if an interactive jobs in launched on the RDD while the streaming job is processing that RDD as well. |
||
} else { | ||
segments ++= generateFakeSegments(blockCount) | ||
} | ||
val rdd = new HDFSBackedBlockRDD[String](sparkContext, hadoopConf, blockIds.toArray, | ||
segments.toArray, false, StorageLevel.MEMORY_ONLY) | ||
|
||
val dataFromRDD = rdd.collect() | ||
// verify each partition is equal to the data pulled out | ||
assert(writtenStrings.flatten === dataFromRDD) | ||
} | ||
|
||
/** | ||
* Write data to HDFS and get a list of Seq of Seqs in which each Seq represents the data that | ||
* went into one block. | ||
* @param count - Number of Strings to write | ||
* @param countPerBlock - Number of Strings per block | ||
* @return - Tuple of (Seq of Seqs, each of these Seqs is one block, Seq of FileSegments, | ||
* each representing the block being written to HDFS. | ||
*/ | ||
private def generateData( | ||
count: Int, | ||
countPerBlock: Int | ||
): Seq[Seq[String]] = { | ||
val strings = (0 until count).map { _ => scala.util.Random.nextString(50)} | ||
strings.grouped(countPerBlock).toSeq | ||
} | ||
|
||
private def writeDataToHDFS( | ||
blockData: Seq[Seq[String]], | ||
blockIds: Seq[BlockId] | ||
): Seq[FileSegment] = { | ||
assert(blockData.size === blockIds.size) | ||
val segments = new ArrayBuffer[FileSegment]() | ||
val writer = new WriteAheadLogWriter(file.toString, hadoopConf) | ||
blockData.zip(blockIds).foreach { | ||
case (data, id) => | ||
segments += writer.write(blockManager.dataSerialize(id, data.iterator)) | ||
} | ||
writer.close() | ||
segments | ||
} | ||
|
||
private def generateFakeSegments(count: Int): Seq[FileSegment] = { | ||
(0 until count).map { | ||
_ => new FileSegment("random", 0l, 0) | ||
} | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actually, I just realized that store in block manager will not work efficiently. After the block gets loaded in the BM, the getPreferredLocation will not return the updated locations of the blocks, as the the location_ is initialized lazily only once (which had 0 locations earlier). It will work correctly, but not efficiently.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We'd have to update the locations to point to the new blocks as well. I will update that in the next PR.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hmm, there are really two ways of doing this - either get a fresh update from the BlockManager using blockIdsToHosts or just update the RDD's own copy. The 2nd one seems fine to me since a newly created RDD with same blocks would get it from BM anyway.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Question is if we update on every call to getPreferredLocation, I am not sure if there will be any loss of efficiency or not.
Lets punt on this for now.