You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
[2022-09-14T05:08:53.350Z] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[2022-09-14T05:08:53.350Z]
[2022-09-14T05:08:53.350Z] answer = 'xro743707'
[2022-09-14T05:08:53.350Z] gateway_client = <py4j.java_gateway.GatewayClient object at 0x7fd774de01f0>
[2022-09-14T05:08:53.350Z] target_id = 'o743706', name = 'collectToPython'
[2022-09-14T05:08:53.350Z]
[2022-09-14T05:08:53.350Z] def get_return_value(answer, gateway_client, target_id=None, name=None):
[2022-09-14T05:08:53.350Z] """Converts an answer received from the Java gateway into a Python object.
[2022-09-14T05:08:53.350Z]
[2022-09-14T05:08:53.350Z] For example, string representation of integers are converted to Python
[2022-09-14T05:08:53.350Z] integer, string representation of objects are converted to JavaObject
[2022-09-14T05:08:53.350Z] instances, etc.
[2022-09-14T05:08:53.350Z]
[2022-09-14T05:08:53.350Z] :param answer: the string returned by the Java gateway
[2022-09-14T05:08:53.350Z] :param gateway_client: the gateway client used to communicate with the Java
[2022-09-14T05:08:53.350Z] Gateway. Only necessary if the answer is a reference (e.g., object,
[2022-09-14T05:08:53.350Z] list, map)
[2022-09-14T05:08:53.350Z] :param target_id: the name of the object from which the answer comes from
[2022-09-14T05:08:53.350Z] (e.g., *object1* in `object1.hello()`). Optional.
[2022-09-14T05:08:53.350Z] :param name: the name of the member from which the answer comes from
[2022-09-14T05:08:53.350Z] (e.g., *hello* in `object1.hello()`). Optional.
[2022-09-14T05:08:53.350Z] """
[2022-09-14T05:08:53.350Z] if is_error(answer)[0]:
[2022-09-14T05:08:53.350Z] if len(answer) > 1:
[2022-09-14T05:08:53.350Z] type = answer[1]
[2022-09-14T05:08:53.350Z] value = OUTPUT_CONVERTER[type](answer[2:], gateway_client)
[2022-09-14T05:08:53.350Z] if answer[1] == REFERENCE_TYPE:
[2022-09-14T05:08:53.350Z] > raise Py4JJavaError(
[2022-09-14T05:08:53.350Z] "An error occurred while calling {0}{1}{2}.\n".
[2022-09-14T05:08:53.350Z] format(target_id, ".", name), value)
[2022-09-14T05:08:53.350Z] E py4j.protocol.Py4JJavaError: An error occurred while calling o743706.collectToPython.
[2022-09-14T05:08:53.350Z] E : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 12737.0 failed 1 times, most recent failure: Lost task 0.0 in stage 12737.0 (TID 44390) (premerge-ci-2-jenkins-rapids-premerge-github-5618-t450r-fzg9j executor driver): java.lang.IllegalArgumentException: nanos > 999999999 or < 0
[2022-09-14T05:08:53.350Z] E at java.sql.Timestamp.setNanos(Timestamp.java:389)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.serde2.io.TimestampWritable.updateTimestamp(TimestampWritable.java:155)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.serde2.io.TimestampWritable.setInternal(TimestampWritable.java:162)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextTimestamp(RecordReaderImpl.java:437)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextValue(RecordReaderImpl.java:601)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextMap(RecordReaderImpl.java:560)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextValue(RecordReaderImpl.java:609)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.next(RecordReaderImpl.java:126)
[2022-09-14T05:08:53.350Z] E at org.apache.hadoop.hive.ql.io.orc.SparkOrcNewRecordReader.nextKeyValue(SparkOrcNewRecordReader.java:85)
[2022-09-14T05:08:53.350Z] E at org.apache.spark.sql.execution.datasources.RecordReaderIterator.hasNext(RecordReaderIterator.scala:37)
[2022-09-14T05:08:53.350Z] E at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
[2022-09-14T05:08:53.351Z] E at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
[2022-09-14T05:08:53.351Z] E at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.Task.run(Task.scala:131)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:497)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:500)
[2022-09-14T05:08:53.351Z] E at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[2022-09-14T05:08:53.351Z] E at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[2022-09-14T05:08:53.351Z] E at java.lang.Thread.run(Thread.java:750)
[2022-09-14T05:08:53.351Z] E
[2022-09-14T05:08:53.351Z] E Driver stacktrace:
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2253)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2202)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2201)
[2022-09-14T05:08:53.351Z] E at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
[2022-09-14T05:08:53.351Z] E at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
[2022-09-14T05:08:53.351Z] E at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2201)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1078)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1078)
[2022-09-14T05:08:53.351Z] E at scala.Option.foreach(Option.scala:407)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1078)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2440)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2382)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2371)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:868)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.SparkContext.runJob(SparkContext.scala:2202)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.SparkContext.runJob(SparkContext.scala:2223)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.SparkContext.runJob(SparkContext.scala:2242)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.SparkContext.runJob(SparkContext.scala:2267)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:390)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.Dataset.$anonfun$collectToPython$1(Dataset.scala:3519)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3687)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
[2022-09-14T05:08:53.351Z] E at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:772)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3685)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:3516)
[2022-09-14T05:08:53.352Z] E at sun.reflect.GeneratedMethodAccessor139.invoke(Unknown Source)
[2022-09-14T05:08:53.352Z] E at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
[2022-09-14T05:08:53.352Z] E at java.lang.reflect.Method.invoke(Method.java:498)
[2022-09-14T05:08:53.352Z] E at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
[2022-09-14T05:08:53.352Z] E at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
[2022-09-14T05:08:53.352Z] E at py4j.Gateway.invoke(Gateway.java:282)
[2022-09-14T05:08:53.352Z] E at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
[2022-09-14T05:08:53.352Z] E at py4j.commands.CallCommand.execute(CallCommand.java:79)
[2022-09-14T05:08:53.352Z] E at py4j.GatewayConnection.run(GatewayConnection.java:238)
[2022-09-14T05:08:53.352Z] E at java.lang.Thread.run(Thread.java:750)
[2022-09-14T05:08:53.352Z] E Caused by: java.lang.IllegalArgumentException: nanos > 999999999 or < 0
[2022-09-14T05:08:53.352Z] E at java.sql.Timestamp.setNanos(Timestamp.java:389)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.serde2.io.TimestampWritable.updateTimestamp(TimestampWritable.java:155)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.serde2.io.TimestampWritable.setInternal(TimestampWritable.java:162)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextTimestamp(RecordReaderImpl.java:437)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextValue(RecordReaderImpl.java:601)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextMap(RecordReaderImpl.java:560)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.nextValue(RecordReaderImpl.java:609)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.next(RecordReaderImpl.java:126)
[2022-09-14T05:08:53.352Z] E at org.apache.hadoop.hive.ql.io.orc.SparkOrcNewRecordReader.nextKeyValue(SparkOrcNewRecordReader.java:85)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.execution.datasources.RecordReaderIterator.hasNext(RecordReaderIterator.scala:37)
[2022-09-14T05:08:53.352Z] E at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
[2022-09-14T05:08:53.352Z] E at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
[2022-09-14T05:08:53.352Z] E at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:345)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.scheduler.Task.run(Task.scala:131)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:497)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
[2022-09-14T05:08:53.352Z] E at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:500)
[2022-09-14T05:08:53.352Z] E at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[2022-09-14T05:08:53.352Z] E at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[2022-09-14T05:08:53.352Z] E ... 1 more
Describe the bug
integration test started failing test_write cases,
seems related to rapidsai/cudf#11586
java.lang.IllegalArgumentException: nanos > 999999999 or < 0
detailed log,
Steps/Code to reproduce bug
The text was updated successfully, but these errors were encountered: