python - raise ValueError(item) when trying to display table in SparkSQL -


i have following code using pyspark , sparksql trying create dataframe, save table , show table:

schema = structtype([structfield("int_field", integertype()),                        structfield("string_field", stringtype())])    dfrow = sqlcontext.createdataframe(simulation, schema)   dfrow.registertemptable("myrow")   sqlcontext.table("myrow").show() 

however, when try display error:

<ipython-input-18-9105248af2c0> in runsimulationjob(job)      15   dfrow = sqlcontext.createdataframe(simulation, schema)      16   dfrow.registertemptable("myrow") ---> 17   sqlcontext.table("myrow").show()      18       19   /databricks/spark/python/pyspark/sql/dataframe.py in show(self, n, truncate)     255         +---+-----+     256         """ --> 257         print(self._jdf.showstring(n, truncate))     258      259     def __repr__(self):  /databricks/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py in __call__(self, *args)     811         answer = self.gateway_client.send_command(command)     812         return_value = get_return_value( --> 813             answer, self.gateway_client, self.target_id, self.name)     814      815         temp_arg in temp_args:  /databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)      43     def deco(*a, **kw):      44         try: ---> 45             return f(*a, **kw)      46         except py4j.protocol.py4jjavaerror e:      47             s = e.java_exception.tostring()  /databricks/spark/python/lib/py4j-0.9-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)     306                 raise py4jjavaerror(     307                     "an error occurred while calling {0}{1}{2}.\n". --> 308                     format(target_id, ".", name), value)     309             else:     310                 raise py4jerror(  py4jjavaerror: error occurred while calling o457.showstring. : org.apache.spark.sparkexception: job aborted due stage failure: task 0 in stage 2.0 failed 1 times, recent failure: lost task 0.0 in stage 2.0 (tid 2, localhost): org.apache.spark.api.python.pythonexception: traceback (most recent call last):   file "/databricks/spark/python/pyspark/worker.py", line 111, in main     process()   file "/databricks/spark/python/pyspark/worker.py", line 106, in process     serializer.dump_stream(func(split_index, iterator), outfile)   file "/databricks/spark/python/pyspark/serializers.py", line 263, in dump_stream     vs = list(itertools.islice(iterator, batch))   file "<ipython-input-18-9105248af2c0>", line 7, in <lambda>   file "<ipython-input-16-880ef66c9ced>", line 2, in simulateloan   file "/databricks/spark/python/pyspark/sql/types.py", line 1259, in __getitem__     raise valueerror(item) valueerror: id      @ org.apache.spark.api.python.pythonrunner$$anon$1.read(pythonrdd.scala:166)     @ org.apache.spark.api.python.pythonrunner$$anon$1.<init>(pythonrdd.scala:207)     @ org.apache.spark.api.python.pythonrunner.compute(pythonrdd.scala:125)     @ org.apache.spark.api.python.pythonrdd.compute(pythonrdd.scala:70)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.scheduler.resulttask.runtask(resulttask.scala:72)     @ org.apache.spark.scheduler.task.run(task.scala:96)     @ org.apache.spark.executor.executor$taskrunner.run(executor.scala:222)     @ java.util.concurrent.threadpoolexecutor.runworker(threadpoolexecutor.java:1142)     @ java.util.concurrent.threadpoolexecutor$worker.run(threadpoolexecutor.java:617)     @ java.lang.thread.run(thread.java:745)  driver stacktrace:     @ org.apache.spark.scheduler.dagscheduler.org$apache$spark$scheduler$dagscheduler$$failjobandindependentstages(dagscheduler.scala:1431)     @ org.apache.spark.scheduler.dagscheduler$$anonfun$abortstage$1.apply(dagscheduler.scala:1419)     @ org.apache.spark.scheduler.dagscheduler$$anonfun$abortstage$1.apply(dagscheduler.scala:1418)     @ scala.collection.mutable.resizablearray$class.foreach(resizablearray.scala:59)     @ scala.collection.mutable.arraybuffer.foreach(arraybuffer.scala:47)     @ org.apache.spark.scheduler.dagscheduler.abortstage(dagscheduler.scala:1418)     @ org.apache.spark.scheduler.dagscheduler$$anonfun$handletasksetfailed$1.apply(dagscheduler.scala:799)     @ org.apache.spark.scheduler.dagscheduler$$anonfun$handletasksetfailed$1.apply(dagscheduler.scala:799)     @ scala.option.foreach(option.scala:236)     @ org.apache.spark.scheduler.dagscheduler.handletasksetfailed(dagscheduler.scala:799)     @ org.apache.spark.scheduler.dagschedulereventprocessloop.doonreceive(dagscheduler.scala:1640)     @ org.apache.spark.scheduler.dagschedulereventprocessloop.onreceive(dagscheduler.scala:1599)     @ org.apache.spark.scheduler.dagschedulereventprocessloop.onreceive(dagscheduler.scala:1588)     @ org.apache.spark.util.eventloop$$anon$1.run(eventloop.scala:48)     @ org.apache.spark.scheduler.dagscheduler.runjob(dagscheduler.scala:620)     @ org.apache.spark.sparkcontext.runjob(sparkcontext.scala:1837)     @ org.apache.spark.sparkcontext.runjob(sparkcontext.scala:1850)     @ org.apache.spark.sparkcontext.runjob(sparkcontext.scala:1863)     @ org.apache.spark.sql.execution.sparkplan.executetake(sparkplan.scala:212)     @ org.apache.spark.sql.execution.limit.executecollect(basicoperators.scala:165)     @ org.apache.spark.sql.execution.sparkplan.executecollectpublic(sparkplan.scala:174)     @ org.apache.spark.sql.dataframe$$anonfun$org$apache$spark$sql$dataframe$$execute$1$1.apply(dataframe.scala:1499)     @ org.apache.spark.sql.dataframe$$anonfun$org$apache$spark$sql$dataframe$$execute$1$1.apply(dataframe.scala:1499)     @ org.apache.spark.sql.execution.sqlexecution$.withnewexecutionid(sqlexecution.scala:56)     @ org.apache.spark.sql.dataframe.withnewexecutionid(dataframe.scala:2086)     @ org.apache.spark.sql.dataframe.org$apache$spark$sql$dataframe$$execute$1(dataframe.scala:1498)     @ org.apache.spark.sql.dataframe.org$apache$spark$sql$dataframe$$collect(dataframe.scala:1505)     @ org.apache.spark.sql.dataframe$$anonfun$head$1.apply(dataframe.scala:1375)     @ org.apache.spark.sql.dataframe$$anonfun$head$1.apply(dataframe.scala:1374)     @ org.apache.spark.sql.dataframe.withcallback(dataframe.scala:2099)     @ org.apache.spark.sql.dataframe.head(dataframe.scala:1374)     @ org.apache.spark.sql.dataframe.take(dataframe.scala:1456)     @ org.apache.spark.sql.dataframe.showstring(dataframe.scala:170)     @ sun.reflect.nativemethodaccessorimpl.invoke0(native method)     @ sun.reflect.nativemethodaccessorimpl.invoke(nativemethodaccessorimpl.java:62)     @ sun.reflect.delegatingmethodaccessorimpl.invoke(delegatingmethodaccessorimpl.java:43)     @ java.lang.reflect.method.invoke(method.java:497)     @ py4j.reflection.methodinvoker.invoke(methodinvoker.java:231)     @ py4j.reflection.reflectionengine.invoke(reflectionengine.java:381)     @ py4j.gateway.invoke(gateway.java:259)     @ py4j.commands.abstractcommand.invokemethod(abstractcommand.java:133)     @ py4j.commands.callcommand.execute(callcommand.java:79)     @ py4j.gatewayconnection.run(gatewayconnection.java:209)     @ java.lang.thread.run(thread.java:745) caused by: org.apache.spark.api.python.pythonexception: traceback (most recent call last):   file "/databricks/spark/python/pyspark/worker.py", line 111, in main     process()   file "/databricks/spark/python/pyspark/worker.py", line 106, in process     serializer.dump_stream(func(split_index, iterator), outfile)   file "/databricks/spark/python/pyspark/serializers.py", line 263, in dump_stream     vs = list(itertools.islice(iterator, batch))   file "<ipython-input-18-9105248af2c0>", line 7, in <lambda>   file "<ipython-input-16-880ef66c9ced>", line 2, in simulateloan   file "/databricks/spark/python/pyspark/sql/types.py", line 1259, in __getitem__     raise valueerror(item) valueerror: id      @ org.apache.spark.api.python.pythonrunner$$anon$1.read(pythonrdd.scala:166)     @ org.apache.spark.api.python.pythonrunner$$anon$1.<init>(pythonrdd.scala:207)     @ org.apache.spark.api.python.pythonrunner.compute(pythonrdd.scala:125)     @ org.apache.spark.api.python.pythonrdd.compute(pythonrdd.scala:70)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.rdd.mappartitionsrdd.compute(mappartitionsrdd.scala:38)     @ org.apache.spark.rdd.rdd.computeorreadcheckpoint(rdd.scala:306)     @ org.apache.spark.rdd.rdd.iterator(rdd.scala:270)     @ org.apache.spark.scheduler.resulttask.runtask(resulttask.scala:72)     @ org.apache.spark.scheduler.task.run(task.scala:96)     @ org.apache.spark.executor.executor$taskrunner.run(executor.scala:222)     @ java.util.concurrent.threadpoolexecutor.runworker(threadpoolexecutor.java:1142)     @ java.util.concurrent.threadpoolexecutor$worker.run(threadpoolexecutor.java:617)     ... 1 more 

am not saving table correctly?

you must have missed in snippet. error complains cannot find id not in sample.

from pyspark.sql.types import stringtype, structfield, structtype, integertype  simulation = [     (1, 'a'),     (2, 'b') ]  schema = structtype(     [structfield("int_field", integertype()),     structfield("string_field", stringtype())] )  dfrow = sqlcontext.createdataframe(simulation, schema) dfrow.registertemptable("myrow") sqlcontext.table("myrow").show() 

works fine.


Comments

Popular posts from this blog

sequelize.js - Sequelize group by with association includes id -

android - Robolectric "INTERNET permission is required" -

java - Android raising EPERM (Operation not permitted) when attempting to send UDP packet after network connection -