IGNITE-11037: [ML] Add parser for Spark KMeans clustering model
authorzaleslaw <zaleslaw.sin@gmail.com>
Wed, 30 Jan 2019 13:46:01 +0000 (16:46 +0300)
committerYury Babak <ybabak@gridgain.com>
Wed, 30 Jan 2019 13:46:01 +0000 (16:46 +0300)
This closes #5970

examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/KMeansFromSparkExample.java [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/data/._SUCCESS.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/data/.part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/data/_SUCCESS [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/data/part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/metadata/._SUCCESS.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/metadata/.part-00000.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/metadata/_SUCCESS [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/kmeans/metadata/part-00000 [new file with mode: 0644]
modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SparkModelParser.java
modules/ml/spark-model-parser/src/main/java/org/apache/ignite/ml/sparkmodelparser/SupportedSparkModels.java

diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/KMeansFromSparkExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/spark/modelparser/KMeansFromSparkExample.java
new file mode 100644 (file)
index 0000000..693c3d7
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.inference.spark.modelparser;
+
+import java.io.FileNotFoundException;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.examples.ml.tutorial.TitanicUtils;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.sparkmodelparser.SparkModelParser;
+import org.apache.ignite.ml.sparkmodelparser.SupportedSparkModels;
+
+/**
+ * Run KMeans model loaded from snappy.parquet file.
+ * The snappy.parquet file was generated by Spark MLLib model.write.overwrite().save(..) operator.
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class KMeansFromSparkExample {
+    /** Path to Spark KMeans model. */
+    public static final String SPARK_MDL_PATH = "examples/src/main/resources/models/spark/serialized/kmeans/data" +
+        "/part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet";
+
+    /** Run example. */
+    public static void main(String[] args) throws FileNotFoundException {
+        System.out.println();
+        System.out.println(">>> K-means model loaded from Spark through serialization over partitioned dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+
+            IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
+
+            IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> {
+                double[] data = new double[] {(double)v[0], (double)v[5], (double)v[6], (double)v[4]};
+                data[0] = Double.isNaN(data[0]) ? 0 : data[0];
+                data[1] = Double.isNaN(data[1]) ? 0 : data[1];
+                data[2] = Double.isNaN(data[2]) ? 0 : data[2];
+                data[3] = Double.isNaN(data[3]) ? 0 : data[3];
+                return VectorUtils.of(data);
+            };
+
+            IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double)v[1];
+
+            KMeansModel mdl = (KMeansModel)SparkModelParser.parse(
+                SPARK_MDL_PATH,
+                SupportedSparkModels.KMEANS
+            );
+
+            System.out.println(">>> K-Means model: " + mdl);
+            System.out.println(">>> ------------------------------------");
+            System.out.println(">>> | Predicted cluster\t| Is survived\t|");
+            System.out.println(">>> ------------------------------------");
+
+            try (QueryCursor<Cache.Entry<Integer, Object[]>> observations = dataCache.query(new ScanQuery<>())) {
+                for (Cache.Entry<Integer, Object[]> observation : observations) {
+                    Vector inputs = featureExtractor.apply(observation.getKey(), observation.getValue());
+                    double isSurvived = lbExtractor.apply(observation.getKey(), observation.getValue());
+                    double clusterId = mdl.predict(inputs);
+
+                    System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", clusterId, isSurvived);
+                }
+            }
+
+            System.out.println(">>> ---------------------------------");
+        }
+    }
+}
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/data/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/kmeans/data/._SUCCESS.crc
new file mode 100644 (file)
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/kmeans/data/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/data/.part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/kmeans/data/.part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet.crc
new file mode 100644 (file)
index 0000000..b1f0cfc
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/kmeans/data/.part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/data/_SUCCESS b/examples/src/main/resources/models/spark/serialized/kmeans/data/_SUCCESS
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/data/part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/kmeans/data/part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet
new file mode 100644 (file)
index 0000000..add4a24
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/kmeans/data/part-00000-e1f2c475-c65a-4b9e-879e-de4afd4f65bc-c000.snappy.parquet differ
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/metadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/kmeans/metadata/._SUCCESS.crc
new file mode 100644 (file)
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/kmeans/metadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/metadata/.part-00000.crc b/examples/src/main/resources/models/spark/serialized/kmeans/metadata/.part-00000.crc
new file mode 100644 (file)
index 0000000..afb8fbf
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/kmeans/metadata/.part-00000.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/metadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/kmeans/metadata/_SUCCESS
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/kmeans/metadata/part-00000 b/examples/src/main/resources/models/spark/serialized/kmeans/metadata/part-00000
new file mode 100644 (file)
index 0000000..63a240e
--- /dev/null
@@ -0,0 +1 @@
+{"class":"org.apache.spark.ml.clustering.KMeansModel","timestamp":1548245307996,"sparkVersion":"2.2.0","uid":"kmeans_44026ddcb381","paramMap":{"maxIter":20,"predictionCol":"cluster","seed":1,"k":2,"tol":1.0E-4,"initSteps":2,"initMode":"k-means||","featuresCol":"features"}}
index 31fbed4..9e8a28c 100644 (file)
@@ -29,11 +29,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.ignite.internal.util.IgniteUtils;
 import org.apache.ignite.ml.IgniteModel;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
 import org.apache.ignite.ml.composition.ModelsComposition;
 import org.apache.ignite.ml.composition.boosting.GDBTrainer;
 import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator;
 import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator;
 import org.apache.ignite.ml.inference.Model;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
 import org.apache.ignite.ml.math.functions.IgniteFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
@@ -83,11 +85,53 @@ public class SparkModelParser {
                 return loadDecisionTreeModel(ignitePathToMdl);
             case RANDOM_FOREST:
                 return loadRandomForestModel(ignitePathToMdl);
+            case KMEANS:
+                return loadKMeansModel(ignitePathToMdl);
             default:
                 throw new UnsupportedSparkModelException(ignitePathToMdl);
         }
     }
 
+
+    private static Model loadKMeansModel(String pathToMdl) {
+        Vector[] centers = null;
+
+        try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
+            PageReadStore pages;
+            final MessageType schema = r.getFooter().getFileMetaData().getSchema();
+            final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
+
+            while (null != (pages = r.readNextRowGroup())) {
+                final int rows = (int)pages.getRowCount();
+                final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
+                centers = new DenseVector[rows];
+
+                for (int i = 0; i < rows; i++) {
+                    final SimpleGroup g = (SimpleGroup)recordReader.read();
+                    // final int clusterIdx = g.getInteger(0, 0);
+
+                    Group clusterCenterCoeff = g.getGroup(1, 0).getGroup(3, 0);
+
+                    final int amountOfCoefficients = clusterCenterCoeff.getFieldRepetitionCount(0);
+
+                    centers[i] = new DenseVector(amountOfCoefficients);
+
+                    for (int j = 0; j < amountOfCoefficients; j++) {
+                        double coefficient = clusterCenterCoeff.getGroup(0, j).getDouble(0, 0);
+                        centers[i].set(j, coefficient);
+                    }
+                }
+            }
+
+        }
+        catch (IOException e) {
+            System.out.println("Error reading parquet file.");
+            e.printStackTrace();
+        }
+
+        return new KMeansModel(centers, new EuclideanDistance());
+    }
+
     /**
      * Load model and its metadata from parquet files.
      *
index 9d9a9e9..f5ee3a6 100644 (file)
@@ -38,6 +38,9 @@ public enum SupportedSparkModels {
     /** Random forest. */
     RANDOM_FOREST,
 
+    /** K-Means. */
+    KMEANS,
+
     /**
      * Gradient boosted trees.
      * NOTE: support binary classification only with raw labels 0 and 1