Skip to content
This repository was archived by the owner on May 12, 2021. It is now read-only.
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
switched examples to new predict method and raised docker pio tag ver…
…sion
  • Loading branch information
Chris Wewerka committed Nov 7, 2018
commit 183c9a79b1428868d217ded1bdd70601b98f4ef4
2 changes: 1 addition & 1 deletion docker/pio/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
FROM openjdk:8

ARG PIO_GIT_URL=https://github.com/apache/predictionio.git
ARG PIO_TAG=v0.13.0
ARG PIO_TAG=v0.14.0
ENV SCALA_VERSION=2.11.12
ENV SPARK_VERSION=2.2.2
ENV HADOOP_VERSION=2.7.7
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-classification"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ package org.apache.predictionio.examples.classification

import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params

import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.classification.NaiveBayesModel
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.SparkContext

import grizzled.slf4j.Logger

import scala.concurrent.{ExecutionContext, Future}

case class AlgorithmParams(
lambda: Double
) extends Params
Expand All @@ -47,11 +47,11 @@ class NaiveBayesAlgorithm(val ap: AlgorithmParams)
NaiveBayes.train(data.labeledPoints, ap.lambda)
}

def predict(model: NaiveBayesModel, query: Query): PredictedResult = {
def predict(model: NaiveBayesModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {
val label = model.predict(Vectors.dense(
Array(query.attr0, query.attr1, query.attr2)
))
PredictedResult(label)
Future.successful(PredictedResult(label))
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-classification"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ package org.apache.predictionio.examples.classification

import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params

import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.classification.NaiveBayesModel
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.SparkContext

import grizzled.slf4j.Logger

import scala.concurrent.{ExecutionContext, Future}

case class AlgorithmParams(
lambda: Double
) extends Params
Expand All @@ -47,12 +47,12 @@ class NaiveBayesAlgorithm(val ap: AlgorithmParams)
NaiveBayes.train(data.labeledPoints, ap.lambda)
}

def predict(model: NaiveBayesModel, query: Query): PredictedResult = {
def predict(model: NaiveBayesModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {
val label = model.predict(Vectors.dense(
// MODIFIED
Array(query.featureA, query.featureB, query.featureC, query.featureD)
))
PredictedResult(label)
Future.successful(PredictedResult(label))
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-ecommercerecommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,15 @@ import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.predictionio.data.storage.Event
import org.apache.predictionio.data.store.LEventStore

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.rdd.RDD

import grizzled.slf4j.Logger

import scala.collection.mutable.PriorityQueue
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global

Expand Down Expand Up @@ -239,7 +238,7 @@ class ECommAlgorithm(val ap: ECommAlgorithmParams)
buyCountsRDD.collectAsMap.toMap
}

def predict(model: ECommModel, query: Query): PredictedResult = {
def predict(model: ECommModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {

val userFeatures = model.userFeatures
val productModels = model.productModels
Expand Down Expand Up @@ -322,7 +321,7 @@ class ECommAlgorithm(val ap: ECommAlgorithmParams)
)
}

new PredictedResult(itemScores)
Future.successful(new PredictedResult(itemScores))
}

/** Generate final blackList based on other constraints */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-ecommercerecommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,15 @@ import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.predictionio.data.storage.Event
import org.apache.predictionio.data.store.LEventStore

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.rdd.RDD

import grizzled.slf4j.Logger

import scala.collection.mutable.PriorityQueue
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global

Expand Down Expand Up @@ -240,7 +239,7 @@ class ECommAlgorithm(val ap: ECommAlgorithmParams)
buyCountsRDD.collectAsMap.toMap
}

def predict(model: ECommModel, query: Query): PredictedResult = {
def predict(model: ECommModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {

val userFeatures = model.userFeatures
val productModels = model.productModels
Expand Down Expand Up @@ -311,7 +310,7 @@ class ECommAlgorithm(val ap: ECommAlgorithmParams)
)
}

new PredictedResult(itemScores)
Future.successful(new PredictedResult(itemScores))
}

/** Generate final blackList based on other constraints */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-recommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ package org.apache.predictionio.examples.recommendation
import org.apache.predictionio.controller.PAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.mllib.recommendation.ALSModel

import grizzled.slf4j.Logger

import scala.concurrent.{ExecutionContext, Future}

case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
Expand Down Expand Up @@ -92,7 +92,7 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
itemStringIntMap = itemStringIntMap)
}

def predict(model: ALSModel, query: Query): PredictedResult = {
def predict(model: ALSModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {
// Convert String ID to Int index for Mllib
model.userStringIntMap.get(query.user).map { userInt =>
// create inverse view of itemStringIntMap
Expand All @@ -103,10 +103,10 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
val itemScores = model
.recommendProductsWithFilter(userInt, query.num, blackList) // MODIFIED
.map (r => ItemScore(itemIntStringMap(r.product), r.rating))
PredictedResult(itemScores)
Future.successful(PredictedResult(itemScores))
}.getOrElse{
logger.info(s"No prediction for unknown user ${query.user}.")
PredictedResult(Array.empty)
Future.successful(PredictedResult(Array.empty))
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-recommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ package org.apache.predictionio.examples.recommendation
import org.apache.predictionio.controller.PAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.mllib.recommendation.ALSModel

import grizzled.slf4j.Logger

import scala.concurrent.{ExecutionContext, Future}

case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
Expand Down Expand Up @@ -92,7 +92,7 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
itemStringIntMap = itemStringIntMap)
}

def predict(model: ALSModel, query: Query): PredictedResult = {
def predict(model: ALSModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {
// Convert String ID to Int index for Mllib
model.userStringIntMap.get(query.user).map { userInt =>
// create inverse view of itemStringIntMap
Expand All @@ -101,10 +101,10 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
// index. Convert it to String ID for returning PredictedResult
val itemScores = model.recommendProducts(userInt, query.num)
.map (r => ItemScore(itemIntStringMap(r.product), r.rating))
PredictedResult(itemScores)
Future.successful(PredictedResult(itemScores))
}.getOrElse{
logger.info(s"No prediction for unknown user ${query.user}.")
PredictedResult(Array.empty)
Future.successful(PredictedResult(Array.empty))
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-recommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ package org.apache.predictionio.examples.recommendation
import org.apache.predictionio.controller.PAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.mllib.recommendation.ALSModel

import grizzled.slf4j.Logger

import scala.concurrent.{ExecutionContext, Future}

case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
Expand Down Expand Up @@ -92,7 +92,7 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
itemStringIntMap = itemStringIntMap)
}

def predict(model: ALSModel, query: Query): PredictedResult = {
def predict(model: ALSModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {
// Convert String ID to Int index for Mllib
model.userStringIntMap.get(query.user).map { userInt =>
// create inverse view of itemStringIntMap
Expand All @@ -101,10 +101,10 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
// index. Convert it to String ID for returning PredictedResult
val itemScores = model.recommendProducts(userInt, query.num)
.map (r => ItemScore(itemIntStringMap(r.product), r.rating))
PredictedResult(itemScores)
Future.successful(PredictedResult(itemScores))
}.getOrElse{
logger.info(s"No prediction for unknown user ${query.user}.")
PredictedResult(Array.empty)
Future.successful(PredictedResult(Array.empty))
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-recommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ package org.apache.predictionio.examples.recommendation
import org.apache.predictionio.controller.PAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.mllib.recommendation.ALSModel

import grizzled.slf4j.Logger

import scala.concurrent.{ExecutionContext, Future}

case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
Expand Down Expand Up @@ -92,7 +92,7 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
itemStringIntMap = itemStringIntMap)
}

def predict(model: ALSModel, query: Query): PredictedResult = {
def predict(model: ALSModel, query: Query)(implicit ec: ExecutionContext): Future[PredictedResult] = {
// Convert String ID to Int index for Mllib
model.userStringIntMap.get(query.user).map { userInt =>
// create inverse view of itemStringIntMap
Expand All @@ -101,10 +101,10 @@ class ALSAlgorithm(val ap: ALSAlgorithmParams)
// index. Convert it to String ID for returning PredictedResult
val itemScores = model.recommendProducts(userInt, query.num)
.map (r => ItemScore(itemIntStringMap(r.product), r.rating))
PredictedResult(itemScores)
Future.successful(PredictedResult(itemScores))
}.getOrElse{
logger.info(s"No prediction for unknown user ${query.user}.")
PredictedResult(Array.empty)
Future.successful(PredictedResult(Array.empty))
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ name := "template-scala-parallel-recommendation"
organization := "org.apache.predictionio"
scalaVersion := "2.11.8"
libraryDependencies ++= Seq(
"org.apache.predictionio" %% "apache-predictionio-core" % "0.13.0" % "provided",
"org.apache.predictionio" %% "apache-predictionio-core" % "0.14.0-SNAPSHOT" % "provided",
"org.apache.spark" %% "spark-mllib" % "2.1.1" % "provided")
Loading