|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | + |
| 18 | +package org.apache.spark.examples |
| 19 | + |
| 20 | +import java.util.Random |
| 21 | +import scala.math.exp |
| 22 | +import org.apache.spark.util.Vector |
| 23 | +import org.apache.spark._ |
| 24 | +import org.apache.spark.deploy.SparkHadoopUtil |
| 25 | +import org.apache.spark.scheduler.InputFormatInfo |
| 26 | +import org.apache.spark.storage.StorageLevel |
| 27 | + |
| 28 | +/** |
| 29 | + * Logistic regression based classification. |
| 30 | + * This example uses Tachyon to persist rdds during computation. |
| 31 | + */ |
| 32 | +object SparkTachyonHdfsLR { |
| 33 | + val D = 10 // Numer of dimensions |
| 34 | + val rand = new Random(42) |
| 35 | + |
| 36 | + case class DataPoint(x: Vector, y: Double) |
| 37 | + |
| 38 | + def parsePoint(line: String): DataPoint = { |
| 39 | + //val nums = line.split(' ').map(_.toDouble) |
| 40 | + //return DataPoint(new Vector(nums.slice(1, D+1)), nums(0)) |
| 41 | + val tok = new java.util.StringTokenizer(line, " ") |
| 42 | + var y = tok.nextToken.toDouble |
| 43 | + var x = new Array[Double](D) |
| 44 | + var i = 0 |
| 45 | + while (i < D) { |
| 46 | + x(i) = tok.nextToken.toDouble; i += 1 |
| 47 | + } |
| 48 | + DataPoint(new Vector(x), y) |
| 49 | + } |
| 50 | + |
| 51 | + def main(args: Array[String]) { |
| 52 | + if (args.length < 3) { |
| 53 | + System.err.println("Usage: SparkTachyonHdfsLR <master> <file> <iters>") |
| 54 | + System.exit(1) |
| 55 | + } |
| 56 | + val inputPath = args(1) |
| 57 | + val conf = SparkHadoopUtil.get.newConfiguration() |
| 58 | + val sc = new SparkContext(args(0), "SparkTachyonHdfsLR", |
| 59 | + System.getenv("SPARK_HOME"), SparkContext.jarOfClass(this.getClass), Map(), |
| 60 | + InputFormatInfo.computePreferredLocations( |
| 61 | + Seq(new InputFormatInfo(conf, classOf[org.apache.hadoop.mapred.TextInputFormat], inputPath)) |
| 62 | + )) |
| 63 | + val lines = sc.textFile(inputPath) |
| 64 | + val points = lines.map(parsePoint _).persist(StorageLevel.TACHYON) |
| 65 | + val ITERATIONS = args(2).toInt |
| 66 | + |
| 67 | + // Initialize w to a random value |
| 68 | + var w = Vector(D, _ => 2 * rand.nextDouble - 1) |
| 69 | + println("Initial w: " + w) |
| 70 | + |
| 71 | + for (i <- 1 to ITERATIONS) { |
| 72 | + println("On iteration " + i) |
| 73 | + val gradient = points.map { p => |
| 74 | + (1 / (1 + exp(-p.y * (w dot p.x))) - 1) * p.y * p.x |
| 75 | + }.reduce(_ + _) |
| 76 | + w -= gradient |
| 77 | + } |
| 78 | + |
| 79 | + println("Final w: " + w) |
| 80 | + System.exit(0) |
| 81 | + } |
| 82 | +} |
0 commit comments