Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- val fp = "/user/user_id.txt"
- sc.textFile(fp).map { s =>
- val Array(did, info_s) = s.split("t")
- val info = info_s.split(",").map { kv =>
- val Array(k, v) = kv.split(":")
- (k, v.toDouble)
- }.toSeq
- (did, info)
- }
- scala.MatchError: [Ljava.lang.String;@51443799 (of class [Ljava.lang.String;)
- at com.test.news.IO$$anonfun$1.apply(App.scala:58)
- at com.test.news.IO$$anonfun$1.apply(App.scala:57)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
- at scala.collection.Iterator$class.foreach(Iterator.scala:893)
- at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
- at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48)
- at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310)
- at scala.collection.AbstractIterator.to(Iterator.scala:1336)
- at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302)
- at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336)
- at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289)
- at scala.collection.AbstractIterator.toArray(Iterator.scala:1336)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$13.apply(RDD.scala:912)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$13.apply(RDD.scala:912)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1916)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1916)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
- at org.apache.spark.scheduler.Task.run(Task.scala:86)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
- at java.lang.Thread.run(Thread.java:745)
- val (id, info) = ("123", "word:123")
- sc.textFile("user_id.txt").map{ line =>
- val fields = line.split("t")
- val info = fields(1).split(",").map { kv =>
- val pairs = kv.split(":")
- (pairs(0), pairs(1).toDouble)
- }.toSeq
- (fields(0), info)
- }.collect()
- # Array[(String, Seq[(String, Double)])] = Array((12334,WrappedArray((this,23.0), (word,21.0), (teacher,23.0))))
- val Array(k, v, _*) = Array(1, 2, 3, 4, 5)
- #k: Int = 1
- #v: Int = 2
- sc.textFile("user_id.txt").map{ line =>
- val Array(id, info_s, _*) = line.split("t")
- val info = info_s.split(",").map { kv =>
- val Array(key, value, _*) = kv.split(":")
- (key, value.toDouble)
- }.toSeq
- (id, info)
- }.collect()
- # Array[(String, Seq[(String, Double)])] = Array((12334,WrappedArray((this,23.0), (word,21.0), (teacher,23.0))))
- scala> val Array(k, v) = "1,2".split(",")
- k: String = 1
- v: String = 2
- scala> val Array(k, v) = "1,2,3".split(",")
- scala.MatchError: [Ljava.lang.String;@508dec2b (of class [Ljava.lang.String;)
- val (k, v) = kv.split(":") match {
- case Array(f1, f2) => (f1, f2)
- case Array(elems) => fatal("found invalid K/V pair: expected 2 elements, found ${elems.length}")
- }
- I got an error for below code (recommendation): below
- import java.util.Properties
- import org.apache.spark.broadcast.Broadcast
- import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
- import org.apache.spark.rdd.RDD
- import org.apache.spark.sql.types.{StringType, StructField, StructType}
- import org.apache.spark.sql.{Row, SQLContext}
- import org.apache.spark.storage.StorageLevel
- import org.apache.spark.{SparkConf, SparkContext}
- import scala.collection.Map
- case class MovieRatings(userID: String, movieID: Int, rating: Double) extends scala.Serializable
- case class Movies(MovieID: String, MovieName: String) extends scala.Serializable
- object RecommendMovie {
- var sqlContext: SQLContext = _
- var BMovieAndName: Broadcast[Map[String, String]] = _
- var userIDToInt: RDD[(String, Long)] = _
- var model: MatrixFactorizationModel = _
- def main1(args: Array[String]): Unit = {
- /* val conf = new SparkConf().setAppName("RecommendMovies")
- val sc = new SparkContext(conf)
- sqlContext = new SQLContext(sc) */
- val HotMovies = sc.textFile("/home/cloudera/machine_learning/hot_movies.csv")
- val UserMovies = sc.textFile("/home/cloudera/machine_learning/user_movies.csv")
- MovieModel(sc, HotMovies, UserMovies)
- val username = if (args.length > 0) args(0) else "wangymm"
- val userIDMap: Map[String, Int] = userIDToInt.collectAsMap().map { case (s, l) => (s, l.toInt) }
- RecommendByName(username, UserMovies, BMovieAndName, userIDMap, model)
- sc.stop()
- }
- def BuildMovies(HotMovies: RDD[String]): RDD[Movies] = {
- HotMovies.map { line => val Array(movieID, pref, movieName) = line.split(',')
- if (movieID.isEmpty) {
- null
- }
- else {
- Movies(movieID, movieName)
- }
- }
- }
- def BuildRating(UserMovies: RDD[String]): RDD[MovieRatings] = {
- UserMovies.map {
- line => val Array(userID, moviesID, countStr) = line.split(',').map(_.trim)
- var count = countStr.toInt
- count = if (count == -1) 3 else count
- MovieRatings(userID, moviesID.toInt, count)
- }
- }
- def MovieModel(sc: SparkContext, HotMovies: RDD[String], UserMovies: RDD[String]): Unit = {
- val MovieAndName = BuildMovies(HotMovies)
- BMovieAndName = sc.broadcast(MovieAndName.map { movie => (movie.MovieID, movie.MovieName) }.collectAsMap())
- val MovieRatingResult = BuildRating(UserMovies)
- userIDToInt = MovieRatingResult.map(_.userID).distinct().zipWithUniqueId()
- val reverseUserID: RDD[(Int, String)] = userIDToInt.map { case (s, l) => (l.toInt, s) }
- val userIDMap: Map[String, Int] = userIDToInt.collectAsMap().map { case (s, l) => (s, l.toInt) }
- val BUserIDMap = sc.broadcast(userIDMap)
- val BreverseUserID = sc.broadcast(reverseUserID.collectAsMap())
- val rating: RDD[Rating] = MovieRatingResult.map { line => Rating(BUserIDMap.value.get(line.userID).get, line.movieID, line.rating) }.persist
- (StorageLevel.MEMORY_AND_DISK)
- model = ALS.train(rating, 50, 10, 0.0001)
- rating.unpersist()
- val allRecommends = model.recommendProductsForUsers(5).map {
- case (userid, recommends) => {
- var recommendStr = ""
- for (r <- recommends) {
- recommendStr += r.product + ":" + BMovieAndName.value.getOrElse(r.product.toString, "") + ","
- }
- if (recommendStr.endsWith(",")) {
- recommendStr = recommendStr.substring(0, recommendStr.length - 1)
- }
- (BreverseUserID.value.get(userid).get, recommendStr)
- }
- }
- allRecommends.saveAsTextFile("/home/cloudera/machine_learning/result.csv")
- HistoryMovies(sc, HotMovies, UserMovies)
- model.userFeatures.unpersist()
- model.productFeatures.unpersist()
- val resultdata = sc.textFile("/home/cloudera/machine_learning/result.csv").map(_.split(","))
- val schema = StructType(
- List(
- StructField("userID", StringType, false),
- StructField("movie01", StringType, false),
- StructField("movie02", StringType, false),
- StructField("movie03", StringType, false),
- StructField("movie04", StringType, false),
- StructField("movie05", StringType, false)
- )
- )
- val rows = resultdata.map(line => Row(line(0).substring(1).toString().trim, line(1).toString.trim, line(2).toString.trim, line(3).toString.trim, line
- (4).toString.trim, line(5).toString.trim))
- val dataRDD = sqlContext.createDataFrame(rows, schema)
- val prop = new Properties()
- prop.put("user", "root")
- prop.put("password", "cloudera")
- dataRDD.write.mode("overwrite").jdbc("jdbc:mysql://localhost:3306/retail_db?characterEncoding=utf8", "retail_db", prop)
- }
- def RecommendByName(UserName: String, UserMovies: RDD[String], BMovieAndName: Broadcast[Map[String, String]], userIDMap: Map[String, Int], model:
- MatrixFactorizationModel): Unit = {
- val UserID = userIDMap.filter {
- case (username, userid) => username == UserName
- }.values.head
- val recommendations = model.recommendProducts(UserID, 5)
- val RecommendMovieResult = recommendations.map(_.product).toSet
- val UserSeen = UserMovies.map(_.split(",")).filter { case Array(user, _, _) => user.trim == UserName }
- val MovieSeen = UserSeen.map { case Array(_, movieid, _) => movieid.toInt }.collect().toSet
- }
- def HistoryMovies(sc: SparkContext, HotMovies: RDD[String], UserMovies: RDD[String]): Unit = {
- val UserHaveSeen: RDD[(String, String)] = UserMovies.map(_.split(",")).map { case Array(userid, movieid, _) => (userid.toString, movieid.toString) }
- val UserSeen = UserHaveSeen.reduceByKey((x, y) => x + "," + y)
- val reverse = UserHaveSeen.map { case (u, m) => (m, u) }
- val UserAndName: RDD[(String, String)] = HotMovies.map(_.split(",")).map { case Array(movieid, pref, moviename) => (movieid.toString, moviename.toString) }
- val resultRDD = reverse.join(UserAndName).map(_._2).reduceByKey((x, y) => x + "," + y)
- resultRDD.saveAsTextFile("/home/cloudera/machine_learning/history.csv")
- val schema = StructType(
- List(
- StructField("userid", StringType, false),
- StructField("historymovies", StringType, false)
- )
- )
- val rows = resultRDD.map { case (key, values) => Row(key.toString.trim, values.toString.trim) }
- val dataRDD = sqlContext.createDataFrame(rows, schema)
- val prop = new Properties()
- prop.put("user", "root")
- prop.put("password", "cloudera")
- dataRDD.write.mode("overwrite").jdbc("jdbc:mysql://localhost:3306/retail_db?characterEncoding=utf8", "history1", prop)
- }
- }
- Got an error:
- 18/06/01 07:37:10 ERROR executor.Executor: Exception in task 0.0 in stage 3.0 (TID 3)
- scala.MatchError: [Ljava.lang.String;@2429eb1 (of class [Ljava.lang.String;)
- at $line126.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at $line126.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$class.foreach(Iterator.scala:727)
- at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
- at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
- at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
- at scala.collection.AbstractIterator.to(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
- at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
- at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:242)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
- at java.lang.Thread.run(Thread.java:745)
- 18/06/01 07:37:10 WARN scheduler.TaskSetManager: Lost task 0.0 in stage 3.0 (TID 3, localhost, executor driver): scala.MatchError: [Ljava.lang.String;@2429eb1 (of class [Ljava.lang.String;)
- at $line126.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at $line126.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$class.foreach(Iterator.scala:727)
- at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
- at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
- at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
- at scala.collection.AbstractIterator.to(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
- at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
- at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:242)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
- at java.lang.Thread.run(Thread.java:745)
- 18/06/01 07:37:10 ERROR scheduler.TaskSetManager: Task 0 in stage 3.0 failed 1 times; aborting job
- org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 1 times, most recent failure: Lost task 0.0 in stage 3.0 (TID 3, localhost, executor driver): scala.MatchError: [Ljava.lang.String;@2429eb1 (of class [Ljava.lang.String;)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$class.foreach(Iterator.scala:727)
- at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
- at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
- at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
- at scala.collection.AbstractIterator.to(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
- at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
- at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:242)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
- at java.lang.Thread.run(Thread.java:745)
- Driver stacktrace:
- at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1457)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1445)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1444)
- at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
- at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
- at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1444)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
- at scala.Option.foreach(Option.scala:236)
- at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1668)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1627)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1616)
- at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
- at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1862)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1875)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1888)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1959)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:927)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
- at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
- at org.apache.spark.rdd.RDD.collect(RDD.scala:926)
- at org.apache.spark.rdd.PairRDDFunctions$$anonfun$collectAsMap$1.apply(PairRDDFunctions.scala:741)
- at org.apache.spark.rdd.PairRDDFunctions$$anonfun$collectAsMap$1.apply(PairRDDFunctions.scala:740)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
- at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
- at org.apache.spark.rdd.PairRDDFunctions.collectAsMap(PairRDDFunctions.scala:740)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$.MovieModel(<console>:142)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$.main1(<console>:115)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:106)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:111)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:113)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:115)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:117)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:119)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:121)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:123)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:125)
- at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:127)
- at $iwC$$iwC$$iwC$$iwC.<init>(<console>:129)
- at $iwC$$iwC$$iwC.<init>(<console>:131)
- at $iwC$$iwC.<init>(<console>:133)
- at $iwC.<init>(<console>:135)
- at <init>(<console>:137)
- at .<init>(<console>:141)
- at .<clinit>(<console>)
- at .<init>(<console>:7)
- at .<clinit>(<console>)
- at $print(<console>)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:606)
- at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1045)
- at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1326)
- at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:821)
- at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:852)
- at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:800)
- at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
- at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
- at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
- at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
- at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
- at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
- at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
- at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
- at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1064)
- at org.apache.spark.repl.Main$.main(Main.scala:35)
- at org.apache.spark.repl.Main.main(Main.scala)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:606)
- at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:730)
- at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
- at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
- at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
- at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
- Caused by: scala.MatchError: [Ljava.lang.String;@2429eb1 (of class [Ljava.lang.String;)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$RecommendMovie$$anonfun$BuildMovies$1.apply(<console>:123)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
- at scala.collection.Iterator$class.foreach(Iterator.scala:727)
- at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
- at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
- at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
- at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
- at scala.collection.AbstractIterator.to(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
- at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
- at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
- at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.rdd.RDD$$anonfun$collect$1$$anonfun$12.apply(RDD.scala:927)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1888)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:242)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
- at java.lang.Thread.run(Thread.java:745)
Add Comment
Please, Sign In to add comment