IGNITE-10866: [ML] Add an example of LogRegression model loading
authorzaleslaw <zaleslaw.sin@gmail.com>
Wed, 16 Jan 2019 13:17:48 +0000 (16:17 +0300)
committerYury Babak <ybabak@gridgain.com>
Wed, 16 Jan 2019 13:17:48 +0000 (16:17 +0300)
This closes #5800

examples/pom.xml
examples/src/main/java/org/apache/ignite/examples/ml/inference/LogRegFromSparkThroughSerializationExample.java [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/data/._SUCCESS.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/data/.part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/data/_SUCCESS [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/data/part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/metadata/._SUCCESS.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/metadata/.part-00000.crc [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/metadata/_SUCCESS [new file with mode: 0644]
examples/src/main/resources/models/spark/serialized/metadata/part-00000 [new file with mode: 0644]

index 252c972..285a0d9 100644 (file)
             <version>2.7.3</version>
         </dependency>
 
+        <dependency>
+            <groupId>org.apache.parquet</groupId>
+            <artifactId>parquet-hadoop</artifactId>
+            <version>1.9.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>2.7.0</version>
+        </dependency>
+
     </dependencies>
 
     <properties>
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/LogRegFromSparkThroughSerializationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/LogRegFromSparkThroughSerializationExample.java
new file mode 100644 (file)
index 0000000..15917b6
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.inference;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.examples.ml.tutorial.TitanicUtils;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.regressions.logistic.LogisticRegressionModel;
+import org.apache.ignite.ml.selection.scoring.evaluator.BinaryClassificationEvaluator;
+import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
+import org.apache.parquet.column.page.PageReadStore;
+import org.apache.parquet.example.data.simple.SimpleGroup;
+import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
+import org.apache.parquet.format.converter.ParquetMetadataConverter;
+import org.apache.parquet.hadoop.ParquetFileReader;
+import org.apache.parquet.hadoop.metadata.ParquetMetadata;
+import org.apache.parquet.io.ColumnIOFactory;
+import org.apache.parquet.io.MessageColumnIO;
+import org.apache.parquet.io.RecordReader;
+import org.apache.parquet.schema.MessageType;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Run logistic regression model loaded from snappy.parquet file.
+ * The snappy.parquet file was generated by Spark MLLib model.write.overwrite().save(..) operator.
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class LogRegFromSparkThroughSerializationExample {
+    /** Run example. */
+    public static void main(String[] args) throws FileNotFoundException {
+        System.out.println();
+        System.out.println(">>> Logistic regression model loaded from Spark through serialization over partitioned dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+
+            IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
+
+            IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> {
+                double[] data = new double[] {(double)v[0], (double)v[5], (double)v[6]};
+                data[0] = Double.isNaN(data[0]) ? 0 : data[0];
+                data[1] = Double.isNaN(data[1]) ? 0 : data[1];
+                data[2] = Double.isNaN(data[2]) ? 0 : data[2];
+
+                return VectorUtils.of(data);
+            };
+
+            IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double)v[1];
+
+            LogisticRegressionModel mdl = SparkModelParser.load();
+
+            System.out.println(">>> Logistic regression model: " + mdl);
+
+            double accuracy = BinaryClassificationEvaluator.evaluate(
+                dataCache,
+                mdl,
+                featureExtractor,
+                lbExtractor,
+                new Accuracy<>()
+            );
+
+            System.out.println("\n>>> Accuracy " + accuracy);
+            System.out.println("\n>>> Test Error " + (1 - accuracy));
+        }
+    }
+
+    /** Util class to build the LogReg model. */
+    private static class SparkModelParser {
+        /** Parquet path. */
+        private static Path parquetPath
+            = new Path("examples/src/main/resources/models/spark/serialized/data/part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet");
+
+        /**
+         * Load LogReg model from parquet file.
+         *
+         * @return Instance of LogReg model.
+         */
+        public static LogisticRegressionModel load() {
+            // Default values
+            double[] rawCoefficients = {-0.7442986893142758, -0.09165692978346071, 0.24494383939344133};
+            Vector coefficients = new DenseVector(rawCoefficients);
+            double interceptor = 1.1374435712657005;
+
+            Configuration conf = new Configuration();
+            try {
+                ParquetMetadata readFooter = ParquetFileReader.readFooter(conf, parquetPath, ParquetMetadataConverter.NO_FILTER);
+                MessageType schema = readFooter.getFileMetaData().getSchema();
+
+                PageReadStore pages;
+                try (ParquetFileReader r = new ParquetFileReader(conf, parquetPath, readFooter)) {
+                    while (null != (pages = r.readNextRowGroup())) {
+                        final long rows = pages.getRowCount();
+                        final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
+                        final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
+                        for (int i = 0; i < rows; i++) {
+                            final SimpleGroup g = (SimpleGroup)recordReader.read();
+                            interceptor = readInterceptor(g);
+                            coefficients = readCoefficients(g);
+                        }
+                    }
+                }
+            }
+            catch (IOException e) {
+                System.out.println("Error reading parquet file.");
+                e.printStackTrace();
+            }
+
+            return new LogisticRegressionModel(coefficients, interceptor);
+
+        }
+
+        /**
+         * Read interceptor value from parquet.
+         *
+         * @param g Interceptor group.
+         */
+        private static double readInterceptor(SimpleGroup g) {
+            double interceptor;
+            final SimpleGroup interceptVector = (SimpleGroup)g.getGroup(2, 0);
+            final SimpleGroup interceptVectorVal = (SimpleGroup)interceptVector.getGroup(3, 0);
+            final SimpleGroup interceptVectorValElement = (SimpleGroup)interceptVectorVal.getGroup(0, 0);
+            interceptor = interceptVectorValElement.getDouble(0, 0);
+            return interceptor;
+        }
+
+        /**
+         * Read coefficient matrix from parquet.
+         *
+         * @param g Coefficient group.
+         * @return Vector of coefficients.
+         */
+        @NotNull private static Vector readCoefficients(SimpleGroup g) {
+            Vector coefficients;
+            final int amountOfCoefficients = g.getGroup(3, 0).getGroup(5, 0).getFieldRepetitionCount(0);
+
+            coefficients = new DenseVector(amountOfCoefficients);
+
+            for (int j = 0; j < amountOfCoefficients; j++) {
+                double coefficient = g.getGroup(3, 0).getGroup(5, 0).getGroup(0, j).getDouble(0, 0);
+                coefficients.set(j, coefficient);
+            }
+            return coefficients;
+        }
+
+        /**
+         * Load mock data Coefficients: [-0.7442986893142758,-0.09165692978346071,0.24494383939344133] Intercept: 1.1374435712657005.
+         *
+         * @return LogReg Model.
+         */
+        public static LogisticRegressionModel loadMock() {
+            double[] rawCoefficients = {-0.7442986893142758, -0.09165692978346071, 0.24494383939344133};
+            Vector coefficients = new DenseVector(rawCoefficients);
+            double interceptor = 1.1374435712657005;
+            return new LogisticRegressionModel(coefficients, interceptor);
+        }
+    }
+}
diff --git a/examples/src/main/resources/models/spark/serialized/data/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/data/._SUCCESS.crc
new file mode 100644 (file)
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/data/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/data/.part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet.crc b/examples/src/main/resources/models/spark/serialized/data/.part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet.crc
new file mode 100644 (file)
index 0000000..7601d9c
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/data/.part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/data/_SUCCESS b/examples/src/main/resources/models/spark/serialized/data/_SUCCESS
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/data/part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet b/examples/src/main/resources/models/spark/serialized/data/part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet
new file mode 100644 (file)
index 0000000..1bbba95
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/data/part-00000-7551081d-c0a8-4ed7-afe4-a464aabc7f80-c000.snappy.parquet differ
diff --git a/examples/src/main/resources/models/spark/serialized/metadata/._SUCCESS.crc b/examples/src/main/resources/models/spark/serialized/metadata/._SUCCESS.crc
new file mode 100644 (file)
index 0000000..3b7b044
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/metadata/._SUCCESS.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/metadata/.part-00000.crc b/examples/src/main/resources/models/spark/serialized/metadata/.part-00000.crc
new file mode 100644 (file)
index 0000000..c309e56
Binary files /dev/null and b/examples/src/main/resources/models/spark/serialized/metadata/.part-00000.crc differ
diff --git a/examples/src/main/resources/models/spark/serialized/metadata/_SUCCESS b/examples/src/main/resources/models/spark/serialized/metadata/_SUCCESS
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/examples/src/main/resources/models/spark/serialized/metadata/part-00000 b/examples/src/main/resources/models/spark/serialized/metadata/part-00000
new file mode 100644 (file)
index 0000000..88874bd
--- /dev/null
@@ -0,0 +1 @@
+{"class":"org.apache.spark.ml.classification.LogisticRegressionModel","timestamp":1547130196932,"sparkVersion":"2.2.0","uid":"logreg_e778e19160e0","paramMap":{"aggregationDepth":2,"standardization":true,"rawPredictionCol":"rawPrediction","predictionCol":"prediction","probabilityCol":"probability","family":"auto","featuresCol":"features","elasticNetParam":0.1,"labelCol":"survived","threshold":0.5,"tol":1.0E-6,"fitIntercept":true,"regParam":0.01,"maxIter":100}}