HBASE-21430 [hbase-connectors] Move hbase-spark* modules to hbase-connectors repo
authorMichael Stack <stack@apache.org>
Fri, 2 Nov 2018 22:27:59 +0000 (15:27 -0700)
committerMichael Stack <stack@apache.org>
Fri, 2 Nov 2018 22:27:59 +0000 (15:27 -0700)
78 files changed:
README.md
kafka/hbase-kafka-proxy/pom.xml
kafka/pom.xml
pom.xml
spark/README.md [new file with mode: 0755]
spark/hbase-spark-it/pom.xml [new file with mode: 0644]
spark/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java [new file with mode: 0644]
spark/hbase-spark-it/src/test/resources/hbase-site.xml [new file with mode: 0644]
spark/hbase-spark/README.txt [new file with mode: 0644]
spark/hbase-spark/pom.xml [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java [new file with mode: 0644]
spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java [new file with mode: 0644]
spark/hbase-spark/src/main/protobuf/SparkFilter.proto [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/BulkLoadPartitioner.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayWrapper.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ColumnFamilyQualifierMapKeyWrapper.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/FamiliesQualifiersValues.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/FamilyHFileWriteOptions.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseDStreamFunctions.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseRDDFunctions.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/KeyFamilyQualifier.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/NewHBaseRDD.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/Bound.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/DataTypeParserWrapper.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseResources.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableCatalog.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/JavaBytesEncoder.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/NaiveEncoder.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SchemaConverters.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerializableConfiguration.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/Utils.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala [new file with mode: 0644]
spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala [new file with mode: 0644]
spark/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java [new file with mode: 0644]
spark/hbase-spark/src/test/resources/hbase-site.xml [new file with mode: 0644]
spark/hbase-spark/src/test/resources/log4j.properties [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/BulkLoadSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpressionSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseCatalogSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCacheSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseContextSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseDStreamFunctionsSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseRDDFunctionsSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseTestSource.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/PartitionFilterSuite.scala [new file with mode: 0644]
spark/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala [new file with mode: 0644]
spark/pom.xml [new file with mode: 0644]

index c2b2071..b0159f9 100644 (file)
--- a/README.md
+++ b/README.md
@@ -3,3 +3,4 @@
 Connectors for [Apache HBase&trade;](https://hbase.apache.org) 
 
   * [Kafka Proxy](https://github.com/apache/hbase-connectors/tree/master/kafka)
+  * [Spark](https://github.com/apache/hbase-connectors/tree/master/spark)
index 642f139..9a2fa13 100755 (executable)
   <description>Proxy that forwards HBase replication events to a Kakfa broker</description>
   <properties>
     <collections.version>4.1</collections.version>
-    <commons-lang3.version>3.6</commons-lang3.version>
-    <commons-io.version>2.5</commons-io.version>
     <kafka-clients.version>2.0.0</kafka-clients.version>
-    <commons-io.version>2.5</commons-io.version>
   </properties>
   <build>
     <plugins>
     <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-lang3</artifactId>
-      <version>${commons-lang3.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
     <dependency>
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
-      <version>${commons-io.version}</version>
     </dependency>
   </dependencies>
 
index c489122..5c4df9e 100644 (file)
     <module>hbase-kafka-model</module>
     <module>hbase-kafka-proxy</module>
   </modules>
-  <properties>
-    <avro.version>1.7.7</avro.version>
-  </properties>
+  <properties />
   <dependencyManagement>
     <dependencies>
       <dependency>
-        <groupId>org.apache.avro</groupId>
-        <artifactId>avro</artifactId>
-        <version>${avro.version}</version>
-      </dependency>
-      <dependency>
         <groupId>org.apache.hbase.connectors.kafka</groupId>
         <artifactId>hbase-kafka-model</artifactId>
         <version>${project.version}</version>
diff --git a/pom.xml b/pom.xml
index 0a6b39c..3505d44 100755 (executable)
--- a/pom.xml
+++ b/pom.xml
@@ -47,6 +47,7 @@
   </licenses>
   <modules>
     <module>kafka</module>
+    <module>spark</module>
     <module>hbase-connectors-assembly</module>
   </modules>
   <scm>
   <properties>
     <!-- See https://maven.apache.org/maven-ci-friendly.html -->
     <revision>1.0.0-SNAPSHOT</revision>
+    <os.maven.version>1.6.1</os.maven.version>
     <maven.javadoc.skip>true</maven.javadoc.skip>
     <maven.build.timestamp.format>yyyy-MM-dd'T'HH:mm</maven.build.timestamp.format>
     <buildDate>${maven.build.timestamp}</buildDate>
     <compileSource>1.8</compileSource>
     <java.min.version>${compileSource}</java.min.version>
     <maven.min.version>3.5.0</maven.min.version>
-    <hbase.version>2.1.0</hbase.version>
+    <hbase.version>3.0.0-SNAPSHOT</hbase.version>
     <maven.compiler.version>3.6.1</maven.compiler.version>
     <exec.maven.version>1.6.0</exec.maven.version>
     <audience-annotations.version>0.5.0</audience-annotations.version>
+    <avro.version>1.7.7</avro.version>
+    <junit.version>4.12</junit.version>
+    <commons-lang3.version>3.6</commons-lang3.version>
+    <slf4j.version>1.7.25</slf4j.version>
+    <commons-io.version>2.5</commons-io.version>
+    <checkstyle.version>8.11</checkstyle.version>
+    <maven.checkstyle.version>3.0.0</maven.checkstyle.version>
+    <external.protobuf.version>2.5.0</external.protobuf.version>
+    <servlet.api.version>3.1.0</servlet.api.version>
+    <!--Need profile for hadoop3. Need to do stuff like set netty
+         version in it... see how hbase/pom.xml does it.
+        <netty.hadoop.version>3.10.5.Final</netty.hadoop.version>
+        For now doing hadoop2 only.
+     -->
+    <hadoop-two.version>2.7.7</hadoop-two.version>
+    <hadoop.version>${hadoop-two.version}</hadoop.version>
+    <netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
+    <!--The below compat.modules also needs to change-->
+    <compat.module>hbase-hadoop2-compat</compat.module>
   </properties>
   <dependencyManagement>
     <dependencies>
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-annotations</artifactId>
         <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-annotations</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-annotations</artifactId>
+        <version>${hbase.version}</version>
         <type>test-jar</type>
         <scope>test</scope>
       </dependency>
         <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-common</artifactId>
         <version>${hbase.version}</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>jsr305</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-common</artifactId>
+        <version>${hbase.version}</version>
         <type>test-jar</type>
         <scope>test</scope>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>jsr305</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
         <version>${hbase.version}</version>
         <scope>provided</scope>
       </dependency>
+      <dependency>
+        <artifactId>hbase-server</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <artifactId>hbase-client</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-protocol-shaded</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-protocol</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-testing-util</artifactId>
+        <version>${hbase.version}</version>
+        <scope>test</scope>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>jsr305</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <artifactId>hbase-it</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <artifactId>hbase-mapreduce</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>hbase-mapreduce</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <artifactId>hbase-zookeeper</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>hbase-zookeeper</artifactId>
+        <groupId>org.apache.hbase</groupId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-hadoop-compat</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-hadoop-compat</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>${compat.module}</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>${compat.module}</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.avro</groupId>
+        <artifactId>avro</artifactId>
+        <version>${avro.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>junit</groupId>
+        <artifactId>junit</artifactId>
+        <version>${junit.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.commons</groupId>
+        <artifactId>commons-lang3</artifactId>
+        <version>${commons-lang3.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-log4j12</artifactId>
+        <version>${slf4j.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-api</artifactId>
+        <version>${slf4j.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>commons-io</groupId>
+        <artifactId>commons-io</artifactId>
+        <version>${commons-io.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>com.google.protobuf</groupId>
+        <artifactId>protobuf-java</artifactId>
+        <version>${external.protobuf.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>javax.servlet</groupId>
+        <artifactId>javax.servlet-api</artifactId>
+        <version>${servlet.api.version}</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>
   <build>
+    <extensions>
+      <extension>
+        <groupId>kr.motd.maven</groupId>
+        <artifactId>os-maven-plugin</artifactId>
+        <version>${os.maven.version}</version>
+      </extension>
+    </extensions>
     <pluginManagement>
       <plugins>
         <!-- See https://maven.apache.org/maven-ci-friendly.html-->
             <timestampPropertyName>build.year</timestampPropertyName>
           </configuration>
         </plugin>
+        <plugin>
+          <!-- Approach followed here is roughly the same as mentioned here:
+          https://maven.apache.org/plugins/maven-checkstyle-plugin/examples/multi-module-config.html
+          -->
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-checkstyle-plugin</artifactId>
+          <version>${maven.checkstyle.version}</version>
+          <dependencies>
+            <dependency>
+              <groupId>org.apache.hbase</groupId>
+              <artifactId>hbase-checkstyle</artifactId>
+              <version>${project.version}</version>
+            </dependency>
+            <dependency>
+              <groupId>com.puppycrawl.tools</groupId>
+              <artifactId>checkstyle</artifactId>
+              <version>${checkstyle.version}</version>
+            </dependency>
+          </dependencies>
+          <configuration>
+            <configLocation>hbase/checkstyle.xml</configLocation>
+            <suppressionsLocation>hbase/checkstyle-suppressions.xml</suppressionsLocation>
+            <includeTestSourceDirectory>true</includeTestSourceDirectory>
+          </configuration>
+        </plugin>
       </plugins>
     </pluginManagement>
     <plugins>
               </rules>
             </configuration>
           </execution>
+          <execution>
+            <id>banned-jsr305</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <bannedDependencies>
+                  <excludes>
+                    <exclude>com.google.code.findbugs:jsr305</exclude>
+                  </excludes>
+                  <message>We don't allow the JSR305 jar from the Findbugs project, see HBASE-16321.</message>
+                </bannedDependencies>
+              </rules>
+            </configuration>
+          </execution>
+          <execution>
+            <id>banned-scala</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <bannedDependencies>
+                  <excludes>
+                    <exclude>org.scala-lang:scala-library</exclude>
+                  </excludes>
+                  <message>We don't allow Scala outside of the hbase-spark module, see HBASE-13992.</message>
+                </bannedDependencies>
+              </rules>
+            </configuration>
+          </execution>
+          <execution>
+            <id>banned-hbase-spark</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <bannedDependencies>
+                  <excludes>
+                    <exclude>org.apache.hbase:hbase-spark</exclude>
+                  </excludes>
+                  <message>We don't allow other modules to depend on hbase-spark, see HBASE-13992.</message>
+                </bannedDependencies>
+              </rules>
+            </configuration>
+          </execution>
         </executions>
       </plugin>
     </plugins>
diff --git a/spark/README.md b/spark/README.md
new file mode 100755 (executable)
index 0000000..dcd11c7
--- /dev/null
@@ -0,0 +1 @@
+# Apache HBase&trade; Spark Connector
diff --git a/spark/hbase-spark-it/pom.xml b/spark/hbase-spark-it/pom.xml
new file mode 100644 (file)
index 0000000..7d7ffe7
--- /dev/null
@@ -0,0 +1,327 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hbase.connectors</groupId>
+    <artifactId>spark</artifactId>
+    <version>${revision}</version>
+    <relativePath>../</relativePath>
+  </parent>
+  <groupId>org.apache.hbase.connectors.spark</groupId>
+  <artifactId>hbase-spark-it</artifactId>
+  <name>Apache HBase - Spark Integration Tests</name>
+  <description>Integration and System tests for HBase</description>
+  <properties>
+    <spark.version>2.1.1</spark.version>
+    <!-- The following version is in sync with Spark's choice
+         Please take caution when this version is modified -->
+    <scala.version>2.11.8</scala.version>
+    <scala.binary.version>2.11</scala.binary.version>
+    <!-- Test inclusion patterns used by failsafe configuration -->
+    <unittest.include>**/Test*.java</unittest.include>
+    <integrationtest.include>**/IntegrationTest*.java</integrationtest.include>
+    <!-- To Run Tests with a particular Xmx Value use -Dfailsafe.Xmx=XXXg -->
+    <failsafe.Xmx>4g</failsafe.Xmx>
+    <!-- To run a single integration test, use -Dit.test=IntegrationTestXXX -->
+  </properties>
+  <build>
+    <pluginManagement>
+      <plugins>
+        <!-- Make a jar and put the sources in the jar -->
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-source-plugin</artifactId>
+        </plugin>
+        <plugin>
+          <!--Make it so assembly:single does nothing in here-->
+          <artifactId>maven-assembly-plugin</artifactId>
+          <configuration>
+            <skipAssembly>true</skipAssembly>
+          </configuration>
+        </plugin>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-failsafe-plugin</artifactId>
+          <version>${surefire.version}</version>
+          <dependencies>
+            <dependency>
+              <groupId>org.apache.maven.surefire</groupId>
+              <artifactId>surefire-junit4</artifactId>
+              <version>${surefire.version}</version>
+            </dependency>
+          </dependencies>
+          <configuration>
+            <includes>
+              <include>${integrationtest.include}</include>
+            </includes>
+            <excludes>
+              <exclude>${unittest.include}</exclude>
+              <exclude>**/*$*</exclude>
+            </excludes>
+            <redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
+            <failIfNoTests>false</failIfNoTests>
+            <testFailureIgnore>false</testFailureIgnore>
+          </configuration>
+          <executions>
+            <execution>
+              <id>integration-test</id>
+              <phase>integration-test</phase>
+              <goals>
+                <goal>integration-test</goal>
+              </goals>
+            </execution>
+            <execution>
+              <id>verify</id>
+              <phase>verify</phase>
+              <goals>
+                <goal>verify</goal>
+              </goals>
+            </execution>
+          </executions>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+
+    <plugins>
+      <!--  Run integration tests with mvn verify -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-failsafe-plugin</artifactId>
+        <configuration>
+          <skip>false</skip>
+          <forkMode>always</forkMode>
+          <!-- TODO: failsafe does timeout, but verify does not fail the build because of the timeout.
+               I believe it is a failsafe bug, we may consider using surefire -->
+          <forkedProcessTimeoutInSeconds>1800</forkedProcessTimeoutInSeconds>
+          <argLine>-enableassertions -Xmx${failsafe.Xmx}
+            -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled
+            -verbose:gc -XX:+PrintCommandLineFlags  -XX:+PrintFlagsFinal</argLine>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <executions>
+          <!-- hbase-spark is ok in this modules -->
+          <execution>
+            <id>banned-hbase-spark</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+          <execution>
+            <id>banned-scala</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-mrapp-generated-classpath</id>
+            <phase>generate-test-resources</phase>
+            <goals>
+              <goal>build-classpath</goal>
+            </goals>
+            <configuration>
+              <!-- needed to run the unit test for DS to generate
+              the required classpath that is required in the env
+              of the launch container in the mini cluster
+              -->
+              <outputFile>${project.build.directory}/test-classes/spark-generated-classpath</outputFile>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <configuration>
+          <failOnViolation>true</failOnViolation>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>net.revelc.code</groupId>
+        <artifactId>warbucks-maven-plugin</artifactId>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <!-- Intra-project dependencies -->
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <type>jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <type>jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase.connectors.spark</groupId>
+      <artifactId>hbase-spark</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-it</artifactId>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>${compat.module}</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-testing-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <!-- Hadoop needs Netty 3.x at test scope for the minicluster -->
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty</artifactId>
+      <version>${netty.hadoop.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-core_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <!-- make sure wrong scala version is not pulled in -->
+          <groupId>org.scala-lang</groupId>
+          <artifactId>scala-library</artifactId>
+        </exclusion>
+        <exclusion>
+          <!-- make sure wrong scala version is not pulled in -->
+          <groupId>org.scala-lang</groupId>
+          <artifactId>scalap</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.code.findbugs</groupId>
+          <artifactId>jsr305</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.scala-lang.modules</groupId>
+      <artifactId>scala-xml_2.11</artifactId>
+      <version>1.0.4</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-streaming_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-streaming_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <type>test-jar</type>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <profiles>
+    <!-- Skip the tests in this module -->
+    <profile>
+      <id>skipIntegrationTests</id>
+      <activation>
+        <property>
+          <name>skipIntegrationTests</name>
+        </property>
+      </activation>
+      <properties>
+        <skipTests>true</skipTests>
+      </properties>
+    </profile>
+  </profiles>
+
+  <reporting>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-report-plugin</artifactId>
+        <version>2.7.2</version>
+        <reportSets>
+          <reportSet>
+            <id>spark-integration-tests</id>
+            <reports>
+              <report>report-only</report>
+            </reports>
+            <configuration>
+              <outputName>failsafe-report</outputName>
+              <reportsDirectories>
+                <reportsDirectory>${project.build.directory}/failsafe-reports</reportsDirectory>
+              </reportsDirectories>
+            </configuration>
+          </reportSet>
+        </reportSets>
+      </plugin>
+    </plugins>
+  </reporting>
+
+</project>
diff --git a/spark/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java b/spark/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java
new file mode 100644 (file)
index 0000000..e5a8ddd
--- /dev/null
@@ -0,0 +1,677 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.IntegrationTestBase;
+import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.mapreduce.IntegrationTestBulkLoad;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.spark.Partitioner;
+import org.apache.spark.SerializableWritable;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.spark.api.java.function.Function2;
+import org.apache.spark.api.java.function.PairFlatMapFunction;
+import org.apache.spark.api.java.function.VoidFunction;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Tuple2;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
+
+/**
+ * Test Bulk Load and Spark on a distributed cluster.
+ * It starts an Spark job that creates linked chains.
+ * This test mimic {@link IntegrationTestBulkLoad} in mapreduce.
+ *
+ * Usage on cluster:
+ *   First add hbase related jars and hbase-spark.jar into spark classpath.
+ *
+ *   spark-submit --class org.apache.hadoop.hbase.spark.IntegrationTestSparkBulkLoad
+ *                HBASE_HOME/lib/hbase-spark-it-XXX-tests.jar -m slowDeterministic
+ *                -Dhbase.spark.bulkload.chainlength=300
+ */
+public class IntegrationTestSparkBulkLoad extends IntegrationTestBase {
+
+  private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestSparkBulkLoad.class);
+
+  // The number of partitions for random generated data
+  private static String BULKLOAD_PARTITIONS_NUM = "hbase.spark.bulkload.partitionsnum";
+  private static int DEFAULT_BULKLOAD_PARTITIONS_NUM = 3;
+
+  private static String BULKLOAD_CHAIN_LENGTH = "hbase.spark.bulkload.chainlength";
+  private static int DEFAULT_BULKLOAD_CHAIN_LENGTH = 200000;
+
+  private static String BULKLOAD_IMPORT_ROUNDS = "hbase.spark.bulkload.importround";
+  private static int DEFAULT_BULKLOAD_IMPORT_ROUNDS  = 1;
+
+  private static String CURRENT_ROUND_NUM = "hbase.spark.bulkload.current.roundnum";
+
+  private static String NUM_REPLICA_COUNT_KEY = "hbase.spark.bulkload.replica.countkey";
+  private static int DEFAULT_NUM_REPLICA_COUNT = 1;
+
+  private static String BULKLOAD_TABLE_NAME = "hbase.spark.bulkload.tableName";
+  private static String DEFAULT_BULKLOAD_TABLE_NAME = "IntegrationTestSparkBulkLoad";
+
+  private static String BULKLOAD_OUTPUT_PATH = "hbase.spark.bulkload.output.path";
+
+  private static final String OPT_LOAD = "load";
+  private static final String OPT_CHECK = "check";
+
+  private boolean load = false;
+  private boolean check = false;
+
+  private static final byte[] CHAIN_FAM  = Bytes.toBytes("L");
+  private static final byte[] SORT_FAM = Bytes.toBytes("S");
+  private static final byte[] DATA_FAM = Bytes.toBytes("D");
+
+  /**
+   * Running spark job to load data into hbase table
+   */
+  public void runLoad() throws Exception {
+    setupTable();
+    int numImportRounds = getConf().getInt(BULKLOAD_IMPORT_ROUNDS, DEFAULT_BULKLOAD_IMPORT_ROUNDS);
+    LOG.info("Running load with numIterations:" + numImportRounds);
+    for (int i = 0; i < numImportRounds; i++) {
+      runLinkedListSparkJob(i);
+    }
+  }
+
+  /**
+   * Running spark job to create LinkedList for testing
+   * @param iteration iteration th of this job
+   * @throws Exception if an HBase operation or getting the test directory fails
+   */
+  public void runLinkedListSparkJob(int iteration) throws Exception {
+    String jobName =  IntegrationTestSparkBulkLoad.class.getSimpleName() + " _load " +
+        EnvironmentEdgeManager.currentTime();
+
+    LOG.info("Running iteration " + iteration + "in Spark Job");
+
+    Path output = null;
+    if (conf.get(BULKLOAD_OUTPUT_PATH) == null) {
+      output = util.getDataTestDirOnTestFS(getTablename() + "-" + iteration);
+    } else {
+      output = new Path(conf.get(BULKLOAD_OUTPUT_PATH));
+    }
+
+    SparkConf sparkConf = new SparkConf().setAppName(jobName).setMaster("local");
+    Configuration hbaseConf = new Configuration(getConf());
+    hbaseConf.setInt(CURRENT_ROUND_NUM, iteration);
+    int partitionNum = hbaseConf.getInt(BULKLOAD_PARTITIONS_NUM, DEFAULT_BULKLOAD_PARTITIONS_NUM);
+
+
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+    JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, hbaseConf);
+
+
+    LOG.info("Partition RDD into " + partitionNum + " parts");
+    List<String> temp = new ArrayList<>();
+    JavaRDD<List<byte[]>> rdd = jsc.parallelize(temp, partitionNum).
+        mapPartitionsWithIndex(new LinkedListCreationMapper(new SerializableWritable<>(hbaseConf)),
+                false);
+
+    hbaseContext.bulkLoad(rdd, getTablename(), new ListToKeyValueFunc(), output.toUri().getPath(),
+        new HashMap<>(), false, HConstants.DEFAULT_MAX_FILE_SIZE);
+
+    try (Connection conn = ConnectionFactory.createConnection(conf);
+        Admin admin = conn.getAdmin();
+        Table table = conn.getTable(getTablename());
+        RegionLocator regionLocator = conn.getRegionLocator(getTablename())) {
+      // Create a new loader.
+      LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
+
+      // Load the HFiles into table.
+      loader.doBulkLoad(output, admin, table, regionLocator);
+    }
+
+
+    // Delete the files.
+    util.getTestFileSystem().delete(output, true);
+    jsc.close();
+  }
+
+  // See mapreduce.IntegrationTestBulkLoad#LinkedListCreationMapper
+  // Used to generate test data
+  public static class LinkedListCreationMapper implements
+      Function2<Integer, Iterator<String>, Iterator<List<byte[]>>> {
+
+    SerializableWritable swConfig = null;
+    private Random rand = new Random();
+
+    public LinkedListCreationMapper(SerializableWritable conf) {
+      this.swConfig = conf;
+    }
+
+    @Override
+    public Iterator<List<byte[]>> call(Integer v1, Iterator v2) throws Exception {
+      Configuration config = (Configuration) swConfig.value();
+      int partitionId = v1.intValue();
+      LOG.info("Starting create List in Partition " + partitionId);
+
+      int partitionNum = config.getInt(BULKLOAD_PARTITIONS_NUM, DEFAULT_BULKLOAD_PARTITIONS_NUM);
+      int chainLength = config.getInt(BULKLOAD_CHAIN_LENGTH, DEFAULT_BULKLOAD_CHAIN_LENGTH);
+      int iterationsNum = config.getInt(BULKLOAD_IMPORT_ROUNDS, DEFAULT_BULKLOAD_IMPORT_ROUNDS);
+      int iterationsCur = config.getInt(CURRENT_ROUND_NUM, 0);
+      List<List<byte[]>> res = new LinkedList<>();
+
+
+      long tempId = partitionId + iterationsCur * partitionNum;
+      long totalPartitionNum = partitionNum * iterationsNum;
+      long chainId = Math.abs(rand.nextLong());
+      chainId = chainId - (chainId % totalPartitionNum) + tempId;
+
+      byte[] chainIdArray = Bytes.toBytes(chainId);
+      long currentRow = 0;
+      long nextRow = getNextRow(0, chainLength);
+      for(long i = 0; i < chainLength; i++) {
+        byte[] rk = Bytes.toBytes(currentRow);
+        // Insert record into a list
+        List<byte[]> tmp1 = Arrays.asList(rk, CHAIN_FAM, chainIdArray, Bytes.toBytes(nextRow));
+        List<byte[]> tmp2 = Arrays.asList(rk, SORT_FAM, chainIdArray, Bytes.toBytes(i));
+        List<byte[]> tmp3 = Arrays.asList(rk, DATA_FAM, chainIdArray, Bytes.toBytes(
+            RandomStringUtils.randomAlphabetic(50)));
+        res.add(tmp1);
+        res.add(tmp2);
+        res.add(tmp3);
+
+        currentRow = nextRow;
+        nextRow = getNextRow(i+1, chainLength);
+      }
+      return res.iterator();
+    }
+
+    /** Returns a unique row id within this chain for this index */
+    private long getNextRow(long index, long chainLength) {
+      long nextRow = Math.abs(new Random().nextLong());
+      // use significant bits from the random number, but pad with index to ensure it is unique
+      // this also ensures that we do not reuse row = 0
+      // row collisions from multiple mappers are fine, since we guarantee unique chainIds
+      nextRow = nextRow - (nextRow % chainLength) + index;
+      return nextRow;
+    }
+  }
+
+
+
+  public static class ListToKeyValueFunc implements
+      Function<List<byte[]>, Pair<KeyFamilyQualifier, byte[]>> {
+    @Override
+    public Pair<KeyFamilyQualifier, byte[]> call(List<byte[]> v1) throws Exception {
+      if (v1 == null || v1.size() != 4) {
+        return null;
+      }
+      KeyFamilyQualifier kfq = new KeyFamilyQualifier(v1.get(0), v1.get(1), v1.get(2));
+
+      return new Pair<>(kfq, v1.get(3));
+    }
+  }
+
+  /**
+   * After adding data to the table start a mr job to check the bulk load.
+   */
+  public void runCheck() throws Exception {
+    LOG.info("Running check");
+    String jobName = IntegrationTestSparkBulkLoad.class.getSimpleName() + "_check" +
+            EnvironmentEdgeManager.currentTime();
+
+    SparkConf sparkConf = new SparkConf().setAppName(jobName).setMaster("local");
+    Configuration hbaseConf = new Configuration(getConf());
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+    JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, hbaseConf);
+
+    Scan scan = new Scan();
+    scan.addFamily(CHAIN_FAM);
+    scan.addFamily(SORT_FAM);
+    scan.setMaxVersions(1);
+    scan.setCacheBlocks(false);
+    scan.setBatch(1000);
+    int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, DEFAULT_NUM_REPLICA_COUNT);
+    if (replicaCount != DEFAULT_NUM_REPLICA_COUNT) {
+      scan.setConsistency(Consistency.TIMELINE);
+    }
+
+    // 1. Using TableInputFormat to get data from HBase table
+    // 2. Mimic LinkedListCheckingMapper in mapreduce.IntegrationTestBulkLoad
+    // 3. Sort LinkKey by its order ID
+    // 4. Group LinkKey if they have same chainId, and repartition RDD by NaturalKeyPartitioner
+    // 5. Check LinkList in each Partition using LinkedListCheckingFlatMapFunc
+    hbaseContext.hbaseRDD(getTablename(), scan).flatMapToPair(new LinkedListCheckingFlatMapFunc())
+        .sortByKey()
+        .combineByKey(new createCombinerFunc(), new mergeValueFunc(), new mergeCombinersFunc(),
+            new NaturalKeyPartitioner(new SerializableWritable<>(hbaseConf)))
+        .foreach(new LinkedListCheckingForeachFunc(new SerializableWritable<>(hbaseConf)));
+    jsc.close();
+  }
+
+  private void runCheckWithRetry() throws Exception {
+    try {
+      runCheck();
+    } catch (Throwable t) {
+      LOG.warn("Received " + StringUtils.stringifyException(t));
+      LOG.warn("Running the check MR Job again to see whether an ephemeral problem or not");
+      runCheck();
+      throw t; // we should still fail the test even if second retry succeeds
+    }
+    // everything green
+  }
+
+  /**
+   * PairFlatMapFunction used to transfer {@code <Row, Result>} to
+   * {@code Tuple<SparkLinkKey, SparkLinkChain>}.
+   */
+  public static class LinkedListCheckingFlatMapFunc implements
+      PairFlatMapFunction<Tuple2<ImmutableBytesWritable, Result>, SparkLinkKey, SparkLinkChain> {
+
+    @Override
+    public Iterator<Tuple2<SparkLinkKey, SparkLinkChain>> call(Tuple2<ImmutableBytesWritable,
+            Result> v) throws Exception {
+      Result value = v._2();
+      long longRk = Bytes.toLong(value.getRow());
+      List<Tuple2<SparkLinkKey, SparkLinkChain>> list = new LinkedList<>();
+
+      for (Map.Entry<byte[], byte[]> entry : value.getFamilyMap(CHAIN_FAM).entrySet()) {
+        long chainId = Bytes.toLong(entry.getKey());
+        long next = Bytes.toLong(entry.getValue());
+        Cell c = value.getColumnCells(SORT_FAM, entry.getKey()).get(0);
+        long order = Bytes.toLong(CellUtil.cloneValue(c));
+        Tuple2<SparkLinkKey, SparkLinkChain> tuple2 =
+            new Tuple2<>(new SparkLinkKey(chainId, order), new SparkLinkChain(longRk, next));
+        list.add(tuple2);
+      }
+      return list.iterator();
+    }
+  }
+
+  public static class createCombinerFunc implements
+      Function<SparkLinkChain, List<SparkLinkChain>> {
+    @Override
+    public List<SparkLinkChain> call(SparkLinkChain v1) throws Exception {
+      List<SparkLinkChain> list = new LinkedList<>();
+      list.add(v1);
+      return list;
+    }
+  }
+
+  public static class mergeValueFunc implements
+      Function2<List<SparkLinkChain>, SparkLinkChain, List<SparkLinkChain>> {
+    @Override
+    public List<SparkLinkChain> call(List<SparkLinkChain> v1, SparkLinkChain v2) throws Exception {
+      if (v1 == null) {
+        v1 = new LinkedList<>();
+      }
+
+      v1.add(v2);
+      return v1;
+    }
+  }
+
+  public static class mergeCombinersFunc implements
+      Function2<List<SparkLinkChain>, List<SparkLinkChain>, List<SparkLinkChain>> {
+    @Override
+    public List<SparkLinkChain> call(List<SparkLinkChain> v1, List<SparkLinkChain> v2)
+            throws Exception {
+      v1.addAll(v2);
+      return v1;
+    }
+  }
+
+  /**
+   * Class to figure out what partition to send a link in the chain to.  This is based upon
+   * the linkKey's ChainId.
+   */
+  public static class NaturalKeyPartitioner extends Partitioner {
+
+    private int numPartions = 0;
+    public NaturalKeyPartitioner(SerializableWritable swConf) {
+      Configuration hbaseConf = (Configuration) swConf.value();
+      numPartions = hbaseConf.getInt(BULKLOAD_PARTITIONS_NUM, DEFAULT_BULKLOAD_PARTITIONS_NUM);
+
+    }
+
+    @Override
+    public int numPartitions() {
+      return numPartions;
+    }
+
+    @Override
+    public int getPartition(Object key) {
+      if (!(key instanceof SparkLinkKey)) {
+        return -1;
+      }
+
+      int hash = ((SparkLinkKey) key).getChainId().hashCode();
+      return Math.abs(hash % numPartions);
+
+    }
+  }
+
+  /**
+   * Sort all LinkChain for one LinkKey, and test {@code List<LinkChain>}.
+   */
+  public static class LinkedListCheckingForeachFunc
+      implements VoidFunction<Tuple2<SparkLinkKey, List<SparkLinkChain>>> {
+
+    private  SerializableWritable swConf = null;
+
+    public LinkedListCheckingForeachFunc(SerializableWritable conf) {
+      swConf = conf;
+    }
+
+    @Override
+    public void call(Tuple2<SparkLinkKey, List<SparkLinkChain>> v1) throws Exception {
+      long next = -1L;
+      long prev = -1L;
+      long count = 0L;
+
+      SparkLinkKey key = v1._1();
+      List<SparkLinkChain> values = v1._2();
+
+      for (SparkLinkChain lc : values) {
+
+        if (next == -1) {
+          if (lc.getRk() != 0L) {
+            String msg = "Chains should all start at rk 0, but read rk " + lc.getRk()
+                + ". Chain:" + key.getChainId() + ", order:" + key.getOrder();
+            throw new RuntimeException(msg);
+          }
+          next = lc.getNext();
+        } else {
+          if (next != lc.getRk()) {
+            String msg = "Missing a link in the chain. Prev rk " + prev + " was, expecting "
+                + next + " but got " + lc.getRk() + ". Chain:" + key.getChainId()
+                + ", order:" + key.getOrder();
+            throw new RuntimeException(msg);
+          }
+          prev = lc.getRk();
+          next = lc.getNext();
+        }
+        count++;
+      }
+      Configuration hbaseConf = (Configuration) swConf.value();
+      int expectedChainLen = hbaseConf.getInt(BULKLOAD_CHAIN_LENGTH, DEFAULT_BULKLOAD_CHAIN_LENGTH);
+      if (count != expectedChainLen) {
+        String msg = "Chain wasn't the correct length.  Expected " + expectedChainLen + " got "
+            + count + ". Chain:" + key.getChainId() + ", order:" + key.getOrder();
+        throw new RuntimeException(msg);
+      }
+    }
+  }
+
+  /**
+   * Writable class used as the key to group links in the linked list.
+   *
+   * Used as the key emited from a pass over the table.
+   */
+  public static class SparkLinkKey implements java.io.Serializable, Comparable<SparkLinkKey> {
+
+    private Long chainId;
+    private Long order;
+
+    public Long getOrder() {
+      return order;
+    }
+
+    public Long getChainId() {
+      return chainId;
+    }
+
+    public SparkLinkKey(long chainId, long order) {
+      this.chainId = chainId;
+      this.order = order;
+    }
+
+    @Override
+    public int hashCode() {
+      return this.getChainId().hashCode();
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      if (!(other instanceof SparkLinkKey)) {
+        return false;
+      }
+
+      SparkLinkKey otherKey = (SparkLinkKey) other;
+      return this.getChainId().equals(otherKey.getChainId());
+    }
+
+    @Override
+    public int compareTo(SparkLinkKey other) {
+      int res = getChainId().compareTo(other.getChainId());
+
+      if (res == 0) {
+        res = getOrder().compareTo(other.getOrder());
+      }
+
+      return res;
+    }
+  }
+
+  /**
+   * Writable used as the value emitted from a pass over the hbase table.
+   */
+  public static class SparkLinkChain implements java.io.Serializable, Comparable<SparkLinkChain>{
+
+    public Long getNext() {
+      return next;
+    }
+
+    public Long getRk() {
+      return rk;
+    }
+
+
+    public SparkLinkChain(Long rk, Long next) {
+      this.rk = rk;
+      this.next = next;
+    }
+
+    private Long rk;
+    private Long next;
+
+    @Override
+    public int compareTo(SparkLinkChain linkChain) {
+      int res = getRk().compareTo(linkChain.getRk());
+      if (res == 0) {
+        res = getNext().compareTo(linkChain.getNext());
+      }
+      return res;
+    }
+
+    @Override
+    public int hashCode() {
+      return getRk().hashCode() ^ getNext().hashCode();
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      if (!(other instanceof SparkLinkChain)) {
+        return false;
+      }
+
+      SparkLinkChain otherKey = (SparkLinkChain) other;
+      return this.getRk().equals(otherKey.getRk()) && this.getNext().equals(otherKey.getNext());
+    }
+  }
+
+
+  /**
+   * Allow the scan to go to replica, this would not affect the runCheck()
+   * Since data are BulkLoaded from HFile into table
+   * @throws IOException if an HBase operation fails
+   * @throws InterruptedException if modifying the table fails
+   */
+  private void installSlowingCoproc() throws IOException, InterruptedException {
+    int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, DEFAULT_NUM_REPLICA_COUNT);
+
+    if (replicaCount == DEFAULT_NUM_REPLICA_COUNT) {
+      return;
+    }
+
+    TableName t = getTablename();
+    Admin admin = util.getAdmin();
+    HTableDescriptor desc = admin.getTableDescriptor(t);
+    desc.addCoprocessor(IntegrationTestBulkLoad.SlowMeCoproScanOperations.class.getName());
+    HBaseTestingUtility.modifyTableSync(admin, desc);
+  }
+
+  @Test
+  public void testBulkLoad() throws Exception {
+    runLoad();
+    installSlowingCoproc();
+    runCheckWithRetry();
+  }
+
+
+  private byte[][] getSplits(int numRegions) {
+    RegionSplitter.UniformSplit split = new RegionSplitter.UniformSplit();
+    split.setFirstRow(Bytes.toBytes(0L));
+    split.setLastRow(Bytes.toBytes(Long.MAX_VALUE));
+    return split.split(numRegions);
+  }
+
+  private void setupTable() throws IOException, InterruptedException {
+    if (util.getAdmin().tableExists(getTablename())) {
+      util.deleteTable(getTablename());
+    }
+
+    util.createTable(
+        getTablename(),
+        new byte[][]{CHAIN_FAM, SORT_FAM, DATA_FAM},
+        getSplits(16)
+    );
+
+    int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, DEFAULT_NUM_REPLICA_COUNT);
+
+    if (replicaCount == DEFAULT_NUM_REPLICA_COUNT) {
+      return;
+    }
+
+    TableName t = getTablename();
+    HBaseTestingUtility.setReplicas(util.getAdmin(), t, replicaCount);
+  }
+
+  @Override
+  public void setUpCluster() throws Exception {
+    util = getTestingUtil(getConf());
+    util.initializeCluster(1);
+    int replicaCount = getConf().getInt(NUM_REPLICA_COUNT_KEY, DEFAULT_NUM_REPLICA_COUNT);
+    if (LOG.isDebugEnabled() && replicaCount != DEFAULT_NUM_REPLICA_COUNT) {
+      LOG.debug("Region Replicas enabled: " + replicaCount);
+    }
+
+    // Scale this up on a real cluster
+    if (util.isDistributedCluster()) {
+      util.getConfiguration().setIfUnset(BULKLOAD_PARTITIONS_NUM,
+              String.valueOf(DEFAULT_BULKLOAD_PARTITIONS_NUM));
+      util.getConfiguration().setIfUnset(BULKLOAD_IMPORT_ROUNDS, "1");
+    } else {
+      util.startMiniMapReduceCluster();
+    }
+  }
+
+  @Override
+  protected void addOptions() {
+    super.addOptions();
+    super.addOptNoArg(OPT_CHECK, "Run check only");
+    super.addOptNoArg(OPT_LOAD, "Run load only");
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    super.processOptions(cmd);
+    check = cmd.hasOption(OPT_CHECK);
+    load = cmd.hasOption(OPT_LOAD);
+  }
+
+  @Override
+  public int runTestFromCommandLine() throws Exception {
+    if (load) {
+      runLoad();
+    } else if (check) {
+      installSlowingCoproc();
+      runCheckWithRetry();
+    } else {
+      testBulkLoad();
+    }
+    return 0;
+  }
+
+  @Override
+  public TableName getTablename() {
+    return getTableName(getConf());
+  }
+
+  public static TableName getTableName(Configuration conf) {
+    return TableName.valueOf(conf.get(BULKLOAD_TABLE_NAME, DEFAULT_BULKLOAD_TABLE_NAME));
+  }
+
+  @Override
+  protected Set<String> getColumnFamilies() {
+    return Sets.newHashSet(Bytes.toString(CHAIN_FAM) , Bytes.toString(DATA_FAM),
+        Bytes.toString(SORT_FAM));
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    IntegrationTestingUtility.setUseDistributedCluster(conf);
+    int status =  ToolRunner.run(conf, new IntegrationTestSparkBulkLoad(), args);
+    System.exit(status);
+  }
+}
diff --git a/spark/hbase-spark-it/src/test/resources/hbase-site.xml b/spark/hbase-spark-it/src/test/resources/hbase-site.xml
new file mode 100644 (file)
index 0000000..99d2ab8
--- /dev/null
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.hconnection.threads.keepalivetime</name>
+    <value>3</value>
+  </property>
+</configuration>
diff --git a/spark/hbase-spark/README.txt b/spark/hbase-spark/README.txt
new file mode 100644 (file)
index 0000000..7fad811
--- /dev/null
@@ -0,0 +1,6 @@
+ON PROTOBUFS
+This maven module has core protobuf definition files ('.protos') used by hbase
+Spark that ship with hbase core including tests. 
+
+Generation of java files from protobuf .proto files included here is done as
+part of the build.
diff --git a/spark/hbase-spark/pom.xml b/spark/hbase-spark/pom.xml
new file mode 100644 (file)
index 0000000..d0f4207
--- /dev/null
@@ -0,0 +1,750 @@
+<?xml version="1.0"?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hbase.connectors</groupId>
+    <artifactId>spark</artifactId>
+    <version>${revision}</version>
+    <relativePath>../</relativePath>
+  </parent>
+  <groupId>org.apache.hbase.connectors.spark</groupId>
+  <artifactId>hbase-spark</artifactId>
+  <name>Apache HBase - Spark</name>
+  <properties>
+    <spark.version>2.1.1</spark.version>
+    <!-- The following version is in sync with Spark's choice
+         Please take caution when this version is modified -->
+    <scala.version>2.11.8</scala.version>
+    <scala.binary.version>2.11</scala.binary.version>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hbase.thirdparty</groupId>
+      <artifactId>hbase-shaded-miscellaneous</artifactId>
+    </dependency>
+    <!-- Force import of Spark's servlet API for unit tests -->
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <!-- Mark Spark / Scala as provided -->
+    <dependency>
+      <groupId>org.scala-lang</groupId>
+      <artifactId>scala-library</artifactId>
+      <version>${scala.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-core_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <!-- make sure wrong scala version is not pulled in -->
+          <groupId>org.scala-lang</groupId>
+          <artifactId>scala-library</artifactId>
+        </exclusion>
+        <exclusion>
+          <!-- make sure wrong scala version is not pulled in -->
+          <groupId>org.scala-lang</groupId>
+          <artifactId>scalap</artifactId>
+        </exclusion>
+        <exclusion>
+           <groupId>com.google.code.findbugs</groupId>
+           <artifactId>jsr305</artifactId>
+        </exclusion>
+        <exclusion>
+          <!-- exclude the wrong snappy-java version in spark-1.6 -->
+          <groupId>org.xerial.snappy</groupId>
+          <artifactId>snappy-java</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xerces</groupId>
+          <artifactId>xercesImpl</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.xerial.snappy</groupId>
+      <artifactId>snappy-java</artifactId>
+      <version>1.1.4</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-streaming_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-streaming_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+      <type>test-jar</type>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scala.binary.version}</artifactId>
+      <version>2.2.4</version>
+      <scope>test</scope>
+      <exclusions>
+       <exclusion>
+         <groupId>org.scala-lang</groupId>
+         <artifactId>scala-library</artifactId>
+       </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.scalamock</groupId>
+      <artifactId>scalamock-scalatest-support_${scala.binary.version}</artifactId>
+      <version>3.1.4</version>
+      <scope>test</scope>
+      <exclusions>
+       <exclusion>
+         <groupId>org.scala-lang</groupId>
+         <artifactId>scala-library</artifactId>
+       </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.module</groupId>
+      <artifactId>jackson-module-scala_${scala.binary.version}</artifactId>
+      <version>${jackson.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.scala-lang</groupId>
+          <artifactId>scala-library</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.scala-lang</groupId>
+          <artifactId>scala-reflect</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.thrift</groupId>
+          <artifactId>thrift</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol-shaded</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-annotations</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.thrift</groupId>
+          <artifactId>thrift</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop2-compat</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.thrift</groupId>
+          <artifactId>thrift</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-zookeeper</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-mapreduce</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.avro</groupId>
+      <artifactId>avro</artifactId>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+      </plugin>
+      <!-- clover fails due to scala/java cross compile.  This guarantees that the scala is
+             compiled before the java that will be evaluated by code coverage (scala will not be).
+            https://confluence.atlassian.com/display/CLOVERKB/Java-+Scala+cross-compilation+error+-+cannot+find+symbol
+            -->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>src/main/scala</source>
+              </sources>
+            </configuration>
+          </execution>
+          <execution>
+            <id>add-test-source</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>add-test-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>src/test/scala</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>compile-protoc</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <executions>
+          <!-- scala is ok in the spark modules -->
+          <execution>
+            <id>banned-scala</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <configuration>
+          <failOnViolation>true</failOnViolation>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>net.revelc.code</groupId>
+        <artifactId>warbucks-maven-plugin</artifactId>
+        <!-- TODO: remove the following config if https://issues.scala-lang.org/browse/SI-3600 is resolved -->
+        <!-- override the root config to add more filter -->
+        <configuration>
+          <ignoreRuleFailures>true</ignoreRuleFailures>
+          <rules>
+            <rule>
+              <!-- exclude the generated java files and package object-->
+              <classPattern>(?!.*(.generated.|.tmpl.|\$|org.apache.hadoop.hbase.spark.hbase.package)).*</classPattern>
+              <includeTestClasses>false</includeTestClasses>
+              <includePublicClasses>true</includePublicClasses>
+              <includePackagePrivateClasses>false</includePackagePrivateClasses>
+              <includeProtectedClasses>false</includeProtectedClasses>
+              <includePrivateClasses>false</includePrivateClasses>
+              <classAnnotationPattern>org[.]apache[.]yetus[.]audience[.]InterfaceAudience.*</classAnnotationPattern>
+            </rule>
+          </rules>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+  <profiles>
+    <!-- Skip the tests in this module -->
+    <profile>
+      <id>skipSparkTests</id>
+      <activation>
+        <property>
+          <name>skipSparkTests</name>
+        </property>
+      </activation>
+      <properties>
+        <surefire.skipFirstPart>true</surefire.skipFirstPart>
+        <surefire.skipSecondPart>true</surefire.skipSecondPart>
+        <skipTests>true</skipTests>
+      </properties>
+    </profile>
+    <!-- profile against Hadoop 2.x: This is the default. -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+          <!--h2-->
+          <name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+          <version>${hadoop-two.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <version>${hadoop-two.version}</version>
+          <exclusions>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <version>${hadoop-two.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+          <exclusions>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+          <version>${hadoop-two.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+          <exclusions>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>xerces</groupId>
+              <artifactId>xercesImpl</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-minikdc</artifactId>
+          <version>${hadoop-two.version}</version>
+          <scope>test</scope>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+          <version>${hadoop-three.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <version>${hadoop-three.version}</version>
+          <exclusions>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <version>${hadoop-three.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+          <exclusions>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+          <version>${hadoop-three.version}</version>
+          <type>test-jar</type>
+          <scope>test</scope>
+          <exclusions>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-minikdc</artifactId>
+          <version>${hadoop-three.version}</version>
+          <scope>test</scope>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!-- Attempt to skip scala-maven-plugin work, see
+      https://github.com/davidB/scala-maven-plugin/issues/198
+      -->
+    <!-- 'scala.skip' is used by the website generation script on jenkins to
+         mitigate the impact of unneeded build forks while building our javadocs.
+      -->
+    <profile>
+      <id>build-scala-sources</id>
+      <activation>
+        <property>
+          <name>scala.skip</name>
+          <value>!true</value>
+        </property>
+      </activation>
+      <build>
+        <plugins>
+      <plugin>
+        <groupId>net.alchim31.maven</groupId>
+        <artifactId>scala-maven-plugin</artifactId>
+        <version>3.2.0</version>
+        <configuration>
+          <charset>${project.build.sourceEncoding}</charset>
+          <scalaVersion>${scala.version}</scalaVersion>
+          <args>
+            <arg>-feature</arg>
+          </args>
+        </configuration>
+        <executions>
+          <execution>
+            <id>scala-compile-first</id>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>add-source</goal>
+              <goal>compile</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>scala-test-compile</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>testCompile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.scalatest</groupId>
+        <artifactId>scalatest-maven-plugin</artifactId>
+        <version>1.0</version>
+        <configuration>
+          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
+          <junitxml>.</junitxml>
+          <filereports>WDF TestSuite.txt</filereports>
+          <parallel>false</parallel>
+        </configuration>
+        <executions>
+          <execution>
+            <id>test</id>
+            <phase>test</phase>
+            <goals>
+              <goal>test</goal>
+            </goals>
+            <configuration>
+              <argLine>-Xmx1536m -XX:ReservedCodeCacheSize=512m</argLine>
+              <parallel>false</parallel>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
new file mode 100644 (file)
index 0000000..a17d2e6
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.spark.datasources.BytesEncoder;
+import org.apache.hadoop.hbase.spark.datasources.Field;
+import org.apache.hadoop.hbase.spark.datasources.JavaBytesEncoder;
+import org.apache.hadoop.hbase.spark.protobuf.generated.SparkFilterProtos;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import scala.collection.mutable.MutableList;
+
+/**
+ * This filter will push down all qualifier logic given to us
+ * by SparkSQL so that we have make the filters at the region server level
+ * and avoid sending the data back to the client to be filtered.
+ */
+@InterfaceAudience.Private
+public class SparkSQLPushDownFilter extends FilterBase{
+  protected static final Logger log = LoggerFactory.getLogger(SparkSQLPushDownFilter.class);
+
+  //The following values are populated with protobuffer
+  DynamicLogicExpression dynamicLogicExpression;
+  byte[][] valueFromQueryArray;
+  HashMap<ByteArrayComparable, HashMap<ByteArrayComparable, String>>
+          currentCellToColumnIndexMap;
+
+  //The following values are transient
+  HashMap<String, ByteArrayComparable> columnToCurrentRowValueMap = null;
+
+  static final byte[] rowKeyFamily = new byte[0];
+  static final byte[] rowKeyQualifier = Bytes.toBytes("key");
+
+  String encoderClassName;
+
+  public SparkSQLPushDownFilter(DynamicLogicExpression dynamicLogicExpression,
+                                byte[][] valueFromQueryArray,
+                                HashMap<ByteArrayComparable,
+                                        HashMap<ByteArrayComparable, String>>
+                                        currentCellToColumnIndexMap, String encoderClassName) {
+    this.dynamicLogicExpression = dynamicLogicExpression;
+    this.valueFromQueryArray = valueFromQueryArray;
+    this.currentCellToColumnIndexMap = currentCellToColumnIndexMap;
+    this.encoderClassName = encoderClassName;
+  }
+
+  public SparkSQLPushDownFilter(DynamicLogicExpression dynamicLogicExpression,
+                                byte[][] valueFromQueryArray,
+                                MutableList<Field> fields, String encoderClassName) {
+    this.dynamicLogicExpression = dynamicLogicExpression;
+    this.valueFromQueryArray = valueFromQueryArray;
+    this.encoderClassName = encoderClassName;
+
+    //generate family qualifier to index mapping
+    this.currentCellToColumnIndexMap =
+            new HashMap<>();
+
+    for (int i = 0; i < fields.size(); i++) {
+      Field field = fields.apply(i);
+
+      byte[] cfBytes = field.cfBytes();
+      ByteArrayComparable familyByteComparable =
+          new ByteArrayComparable(cfBytes, 0, cfBytes.length);
+
+      HashMap<ByteArrayComparable, String> qualifierIndexMap =
+              currentCellToColumnIndexMap.get(familyByteComparable);
+
+      if (qualifierIndexMap == null) {
+        qualifierIndexMap = new HashMap<>();
+        currentCellToColumnIndexMap.put(familyByteComparable, qualifierIndexMap);
+      }
+      byte[] qBytes = field.colBytes();
+      ByteArrayComparable qualifierByteComparable =
+          new ByteArrayComparable(qBytes, 0, qBytes.length);
+
+      qualifierIndexMap.put(qualifierByteComparable, field.colName());
+    }
+  }
+
+  @Override
+  public ReturnCode filterCell(final Cell c) throws IOException {
+
+    //If the map RowValueMap is empty then we need to populate
+    // the row key
+    if (columnToCurrentRowValueMap == null) {
+      columnToCurrentRowValueMap = new HashMap<>();
+      HashMap<ByteArrayComparable, String> qualifierColumnMap =
+              currentCellToColumnIndexMap.get(
+                      new ByteArrayComparable(rowKeyFamily, 0, rowKeyFamily.length));
+
+      if (qualifierColumnMap != null) {
+        String rowKeyColumnName =
+                qualifierColumnMap.get(
+                        new ByteArrayComparable(rowKeyQualifier, 0,
+                                rowKeyQualifier.length));
+        //Make sure that the rowKey is part of the where clause
+        if (rowKeyColumnName != null) {
+          columnToCurrentRowValueMap.put(rowKeyColumnName,
+                  new ByteArrayComparable(c.getRowArray(),
+                          c.getRowOffset(), c.getRowLength()));
+        }
+      }
+    }
+
+    //Always populate the column value into the RowValueMap
+    ByteArrayComparable currentFamilyByteComparable =
+            new ByteArrayComparable(c.getFamilyArray(),
+            c.getFamilyOffset(),
+            c.getFamilyLength());
+
+    HashMap<ByteArrayComparable, String> qualifierColumnMap =
+            currentCellToColumnIndexMap.get(
+                    currentFamilyByteComparable);
+
+    if (qualifierColumnMap != null) {
+
+      String columnName =
+              qualifierColumnMap.get(
+                      new ByteArrayComparable(c.getQualifierArray(),
+                              c.getQualifierOffset(),
+                              c.getQualifierLength()));
+
+      if (columnName != null) {
+        columnToCurrentRowValueMap.put(columnName,
+                new ByteArrayComparable(c.getValueArray(),
+                        c.getValueOffset(), c.getValueLength()));
+      }
+    }
+
+    return ReturnCode.INCLUDE;
+  }
+
+
+  @Override
+  public boolean filterRow() throws IOException {
+
+    try {
+      boolean result =
+              dynamicLogicExpression.execute(columnToCurrentRowValueMap,
+                      valueFromQueryArray);
+      columnToCurrentRowValueMap = null;
+      return !result;
+    } catch (Throwable e) {
+      log.error("Error running dynamic logic on row", e);
+    }
+    return false;
+  }
+
+
+  /**
+   * @param pbBytes A pb serialized instance
+   * @return An instance of SparkSQLPushDownFilter
+   * @throws DeserializationException if the filter cannot be parsed from the given bytes
+   */
+  @SuppressWarnings("unused")
+  public static SparkSQLPushDownFilter parseFrom(final byte[] pbBytes)
+          throws DeserializationException {
+
+    SparkFilterProtos.SQLPredicatePushDownFilter proto;
+    try {
+      proto = SparkFilterProtos.SQLPredicatePushDownFilter.parseFrom(pbBytes);
+    } catch (InvalidProtocolBufferException e) {
+      throw new DeserializationException(e);
+    }
+
+    String encoder = proto.getEncoderClassName();
+    BytesEncoder enc = JavaBytesEncoder.create(encoder);
+
+    //Load DynamicLogicExpression
+    DynamicLogicExpression dynamicLogicExpression =
+            DynamicLogicExpressionBuilder.build(proto.getDynamicLogicExpression(), enc);
+
+    //Load valuesFromQuery
+    final List<ByteString> valueFromQueryArrayList = proto.getValueFromQueryArrayList();
+    byte[][] valueFromQueryArray = new byte[valueFromQueryArrayList.size()][];
+    for (int i = 0; i < valueFromQueryArrayList.size(); i++) {
+      valueFromQueryArray[i] = valueFromQueryArrayList.get(i).toByteArray();
+    }
+
+    //Load mapping from HBase family/qualifier to Spark SQL columnName
+    HashMap<ByteArrayComparable, HashMap<ByteArrayComparable, String>>
+            currentCellToColumnIndexMap = new HashMap<>();
+
+    for (SparkFilterProtos.SQLPredicatePushDownCellToColumnMapping
+            sqlPredicatePushDownCellToColumnMapping :
+            proto.getCellToColumnMappingList()) {
+
+      byte[] familyArray =
+              sqlPredicatePushDownCellToColumnMapping.getColumnFamily().toByteArray();
+      ByteArrayComparable familyByteComparable =
+              new ByteArrayComparable(familyArray, 0, familyArray.length);
+      HashMap<ByteArrayComparable, String> qualifierMap =
+              currentCellToColumnIndexMap.get(familyByteComparable);
+
+      if (qualifierMap == null) {
+        qualifierMap = new HashMap<>();
+        currentCellToColumnIndexMap.put(familyByteComparable, qualifierMap);
+      }
+      byte[] qualifierArray =
+              sqlPredicatePushDownCellToColumnMapping.getQualifier().toByteArray();
+
+      ByteArrayComparable qualifierByteComparable =
+              new ByteArrayComparable(qualifierArray, 0 ,qualifierArray.length);
+
+      qualifierMap.put(qualifierByteComparable,
+              sqlPredicatePushDownCellToColumnMapping.getColumnName());
+    }
+
+    return new SparkSQLPushDownFilter(dynamicLogicExpression,
+            valueFromQueryArray, currentCellToColumnIndexMap, encoder);
+  }
+
+  /**
+   * @return The filter serialized using pb
+   */
+  public byte[] toByteArray() {
+
+    SparkFilterProtos.SQLPredicatePushDownFilter.Builder builder =
+            SparkFilterProtos.SQLPredicatePushDownFilter.newBuilder();
+
+    SparkFilterProtos.SQLPredicatePushDownCellToColumnMapping.Builder columnMappingBuilder =
+            SparkFilterProtos.SQLPredicatePushDownCellToColumnMapping.newBuilder();
+
+    builder.setDynamicLogicExpression(dynamicLogicExpression.toExpressionString());
+    for (byte[] valueFromQuery: valueFromQueryArray) {
+      builder.addValueFromQueryArray(ByteStringer.wrap(valueFromQuery));
+    }
+
+    for (Map.Entry<ByteArrayComparable, HashMap<ByteArrayComparable, String>>
+            familyEntry : currentCellToColumnIndexMap.entrySet()) {
+      for (Map.Entry<ByteArrayComparable, String> qualifierEntry :
+              familyEntry.getValue().entrySet()) {
+        columnMappingBuilder.setColumnFamily(
+                ByteStringer.wrap(familyEntry.getKey().bytes()));
+        columnMappingBuilder.setQualifier(
+                ByteStringer.wrap(qualifierEntry.getKey().bytes()));
+        columnMappingBuilder.setColumnName(qualifierEntry.getValue());
+        builder.addCellToColumnMapping(columnMappingBuilder.build());
+      }
+    }
+    builder.setEncoderClassName(encoderClassName);
+
+
+    return builder.build().toByteArray();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof SparkSQLPushDownFilter)) {
+      return false;
+    }
+    if (this == obj) {
+      return true;
+    }
+    SparkSQLPushDownFilter f = (SparkSQLPushDownFilter) obj;
+    if (this.valueFromQueryArray.length != f.valueFromQueryArray.length) {
+      return false;
+    }
+    int i = 0;
+    for (byte[] val : this.valueFromQueryArray) {
+      if (!Bytes.equals(val, f.valueFromQueryArray[i])) {
+        return false;
+      }
+      i++;
+    }
+    return this.dynamicLogicExpression.equals(f.dynamicLogicExpression) &&
+      this.currentCellToColumnIndexMap.equals(f.currentCellToColumnIndexMap) &&
+      this.encoderClassName.equals(f.encoderClassName);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.dynamicLogicExpression, Arrays.hashCode(this.valueFromQueryArray),
+      this.currentCellToColumnIndexMap, this.encoderClassName);
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
new file mode 100644 (file)
index 0000000..8cf2c7f
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This is a simple example of deleting records in HBase
+ * with the bulkDelete function.
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseBulkDeleteExample {
+
+  private JavaHBaseBulkDeleteExample() {}
+
+  public static void main(String[] args) {
+    if (args.length < 1) {
+      System.out.println("JavaHBaseBulkDeleteExample  {tableName}");
+      return;
+    }
+
+    String tableName = args[0];
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkDeleteExample " + tableName);
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      List<byte[]> list = new ArrayList<>(5);
+      list.add(Bytes.toBytes("1"));
+      list.add(Bytes.toBytes("2"));
+      list.add(Bytes.toBytes("3"));
+      list.add(Bytes.toBytes("4"));
+      list.add(Bytes.toBytes("5"));
+
+      JavaRDD<byte[]> rdd = jsc.parallelize(list);
+
+      Configuration conf = HBaseConfiguration.create();
+
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+      hbaseContext.bulkDelete(rdd,
+              TableName.valueOf(tableName), new DeleteFunction(), 4);
+    } finally {
+      jsc.stop();
+    }
+
+  }
+
+  public static class DeleteFunction implements Function<byte[], Delete> {
+    private static final long serialVersionUID = 1L;
+    public Delete call(byte[] v) throws Exception {
+      return new Delete(v);
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
new file mode 100644 (file)
index 0000000..b5143de
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This is a simple example of getting records in HBase
+ * with the bulkGet function.
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseBulkGetExample {
+
+  private JavaHBaseBulkGetExample() {}
+
+  public static void main(String[] args) {
+    if (args.length < 1) {
+      System.out.println("JavaHBaseBulkGetExample  {tableName}");
+      return;
+    }
+
+    String tableName = args[0];
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkGetExample " + tableName);
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      List<byte[]> list = new ArrayList<>(5);
+      list.add(Bytes.toBytes("1"));
+      list.add(Bytes.toBytes("2"));
+      list.add(Bytes.toBytes("3"));
+      list.add(Bytes.toBytes("4"));
+      list.add(Bytes.toBytes("5"));
+
+      JavaRDD<byte[]> rdd = jsc.parallelize(list);
+
+      Configuration conf = HBaseConfiguration.create();
+
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+      hbaseContext.bulkGet(TableName.valueOf(tableName), 2, rdd, new GetFunction(),
+              new ResultFunction());
+    } finally {
+      jsc.stop();
+    }
+  }
+
+  public static class GetFunction implements Function<byte[], Get> {
+
+    private static final long serialVersionUID = 1L;
+
+    public Get call(byte[] v) throws Exception {
+      return new Get(v);
+    }
+  }
+
+  public static class ResultFunction implements Function<Result, String> {
+
+    private static final long serialVersionUID = 1L;
+
+    public String call(Result result) throws Exception {
+      Iterator<Cell> it = result.listCells().iterator();
+      StringBuilder b = new StringBuilder();
+
+      b.append(Bytes.toString(result.getRow())).append(":");
+
+      while (it.hasNext()) {
+        Cell cell = it.next();
+        String q = Bytes.toString(cell.getQualifierArray());
+        if (q.equals("counter")) {
+          b.append("(")
+                  .append(Bytes.toString(cell.getQualifierArray()))
+                  .append(",")
+                  .append(Bytes.toLong(cell.getValueArray()))
+                  .append(")");
+        } else {
+          b.append("(")
+                  .append(Bytes.toString(cell.getQualifierArray()))
+                  .append(",")
+                  .append(Bytes.toString(cell.getValueArray()))
+                  .append(")");
+        }
+      }
+      return b.toString();
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
new file mode 100644 (file)
index 0000000..6738059
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.spark.FamilyHFileWriteOptions;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.spark.KeyFamilyQualifier;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Run this example using command below:
+ *
+ *  SPARK_HOME/bin/spark-submit --master local[2]
+ *  --class org.apache.hadoop.hbase.spark.example.hbasecontext.JavaHBaseBulkLoadExample
+ *  path/to/hbase-spark.jar {path/to/output/HFiles}
+ *
+ * This example will output put hfiles in {path/to/output/HFiles}, and user can run
+ * 'hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles' to load the HFiles into table to
+ * verify this example.
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseBulkLoadExample {
+  private JavaHBaseBulkLoadExample() {}
+
+  public static void main(String[] args) {
+    if (args.length < 1) {
+      System.out.println("JavaHBaseBulkLoadExample  " + "{outputPath}");
+      return;
+    }
+
+    String tableName = "bulkload-table-test";
+    String columnFamily1 = "f1";
+    String columnFamily2 = "f2";
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkLoadExample " + tableName);
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      List<String> list= new ArrayList<String>();
+      // row1
+      list.add("1," + columnFamily1 + ",b,1");
+      // row3
+      list.add("3," + columnFamily1 + ",a,2");
+      list.add("3," + columnFamily1 + ",b,1");
+      list.add("3," + columnFamily2 + ",a,1");
+      /* row2 */
+      list.add("2," + columnFamily2 + ",a,3");
+      list.add("2," + columnFamily2 + ",b,3");
+
+      JavaRDD<String> rdd = jsc.parallelize(list);
+
+      Configuration conf = HBaseConfiguration.create();
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+
+
+      hbaseContext.bulkLoad(rdd, TableName.valueOf(tableName),new BulkLoadFunction(), args[0],
+          new HashMap<byte[], FamilyHFileWriteOptions>(), false, HConstants.DEFAULT_MAX_FILE_SIZE);
+    } finally {
+      jsc.stop();
+    }
+  }
+
+  public static class BulkLoadFunction
+          implements Function<String, Pair<KeyFamilyQualifier, byte[]>> {
+    @Override
+    public Pair<KeyFamilyQualifier, byte[]> call(String v1) throws Exception {
+      if (v1 == null) {
+        return null;
+      }
+
+      String[] strs = v1.split(",");
+      if(strs.length != 4) {
+        return null;
+      }
+
+      KeyFamilyQualifier kfq = new KeyFamilyQualifier(Bytes.toBytes(strs[0]),
+              Bytes.toBytes(strs[1]), Bytes.toBytes(strs[2]));
+      return new Pair(kfq, Bytes.toBytes(strs[3]));
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
new file mode 100644 (file)
index 0000000..4a80b96
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This is a simple example of putting records in HBase
+ * with the bulkPut function.
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseBulkPutExample {
+
+  private JavaHBaseBulkPutExample() {}
+
+  public static void main(String[] args) {
+    if (args.length < 2) {
+      System.out.println("JavaHBaseBulkPutExample  " +
+              "{tableName} {columnFamily}");
+      return;
+    }
+
+    String tableName = args[0];
+    String columnFamily = args[1];
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkPutExample " + tableName);
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      List<String> list = new ArrayList<>(5);
+      list.add("1," + columnFamily + ",a,1");
+      list.add("2," + columnFamily + ",a,2");
+      list.add("3," + columnFamily + ",a,3");
+      list.add("4," + columnFamily + ",a,4");
+      list.add("5," + columnFamily + ",a,5");
+
+      JavaRDD<String> rdd = jsc.parallelize(list);
+
+      Configuration conf = HBaseConfiguration.create();
+
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+      hbaseContext.bulkPut(rdd,
+              TableName.valueOf(tableName),
+              new PutFunction());
+    } finally {
+      jsc.stop();
+    }
+  }
+
+  public static class PutFunction implements Function<String, Put> {
+
+    private static final long serialVersionUID = 1L;
+
+    public Put call(String v) throws Exception {
+      String[] cells = v.split(",");
+      Put put = new Put(Bytes.toBytes(cells[0]));
+
+      put.addColumn(Bytes.toBytes(cells[1]), Bytes.toBytes(cells[2]),
+              Bytes.toBytes(cells[3]));
+      return put;
+    }
+
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
new file mode 100644 (file)
index 0000000..0d4f680
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
+import scala.Tuple2;
+
+/**
+ * This is a simple example of scanning records from HBase
+ * with the hbaseRDD function.
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseDistributedScan {
+
+  private JavaHBaseDistributedScan() {}
+
+  public static void main(String[] args) {
+    if (args.length < 1) {
+      System.out.println("JavaHBaseDistributedScan {tableName}");
+      return;
+    }
+
+    String tableName = args[0];
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseDistributedScan " + tableName);
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      Configuration conf = HBaseConfiguration.create();
+
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+      Scan scan = new Scan();
+      scan.setCaching(100);
+
+      JavaRDD<Tuple2<ImmutableBytesWritable, Result>> javaRdd =
+              hbaseContext.hbaseRDD(TableName.valueOf(tableName), scan);
+
+      List<String> results = javaRdd.map(new ScanConvertFunction()).collect();
+
+      System.out.println("Result Size: " + results.size());
+    } finally {
+      jsc.stop();
+    }
+  }
+
+  private static class ScanConvertFunction implements
+          Function<Tuple2<ImmutableBytesWritable, Result>, String> {
+    @Override
+    public String call(Tuple2<ImmutableBytesWritable, Result> v1) throws Exception {
+      return Bytes.toString(v1._1().copyBytes());
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
new file mode 100644 (file)
index 0000000..a55d853
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.spark.api.java.function.VoidFunction;
+import org.apache.yetus.audience.InterfaceAudience;
+import scala.Tuple2;
+
+/**
+ * This is a simple example of using the foreachPartition
+ * method with a HBase connection
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseMapGetPutExample {
+
+  private JavaHBaseMapGetPutExample() {}
+
+  public static void main(String[] args) {
+    if (args.length < 1) {
+      System.out.println("JavaHBaseBulkGetExample {tableName}");
+      return;
+    }
+
+    final String tableName = args[0];
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkGetExample " + tableName);
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      List<byte[]> list = new ArrayList<>(5);
+      list.add(Bytes.toBytes("1"));
+      list.add(Bytes.toBytes("2"));
+      list.add(Bytes.toBytes("3"));
+      list.add(Bytes.toBytes("4"));
+      list.add(Bytes.toBytes("5"));
+
+      JavaRDD<byte[]> rdd = jsc.parallelize(list);
+      Configuration conf = HBaseConfiguration.create();
+
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+      hbaseContext.foreachPartition(rdd,
+              new VoidFunction<Tuple2<Iterator<byte[]>, Connection>>() {
+          public void call(Tuple2<Iterator<byte[]>, Connection> t)
+                  throws Exception {
+            Table table = t._2().getTable(TableName.valueOf(tableName));
+            BufferedMutator mutator = t._2().getBufferedMutator(TableName.valueOf(tableName));
+
+            while (t._1().hasNext()) {
+              byte[] b = t._1().next();
+              Result r = table.get(new Get(b));
+              if (r.getExists()) {
+                mutator.mutate(new Put(b));
+              }
+            }
+
+            mutator.flush();
+            mutator.close();
+            table.close();
+          }
+        });
+    } finally {
+      jsc.stop();
+    }
+  }
+
+  public static class GetFunction implements Function<byte[], Get> {
+    private static final long serialVersionUID = 1L;
+    public Get call(byte[] v) throws Exception {
+      return new Get(v);
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
new file mode 100644 (file)
index 0000000..74fadc6
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.spark.streaming.Duration;
+import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
+import org.apache.spark.streaming.api.java.JavaStreamingContext;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This is a simple example of BulkPut with Spark Streaming
+ */
+@InterfaceAudience.Private
+final public class JavaHBaseStreamingBulkPutExample {
+
+  private JavaHBaseStreamingBulkPutExample() {}
+
+  public static void main(String[] args) {
+    if (args.length < 4) {
+      System.out.println("JavaHBaseBulkPutExample  " +
+              "{host} {port} {tableName}");
+      return;
+    }
+
+    String host = args[0];
+    String port = args[1];
+    String tableName = args[2];
+
+    SparkConf sparkConf =
+            new SparkConf().setAppName("JavaHBaseStreamingBulkPutExample " +
+                    tableName + ":" + port + ":" + tableName);
+
+    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+    try {
+      JavaStreamingContext jssc =
+              new JavaStreamingContext(jsc, new Duration(1000));
+
+      JavaReceiverInputDStream<String> javaDstream =
+              jssc.socketTextStream(host, Integer.parseInt(port));
+
+      Configuration conf = HBaseConfiguration.create();
+
+      JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+      hbaseContext.streamBulkPut(javaDstream,
+              TableName.valueOf(tableName),
+              new PutFunction());
+    } finally {
+      jsc.stop();
+    }
+  }
+
+  public static class PutFunction implements Function<String, Put> {
+
+    private static final long serialVersionUID = 1L;
+
+    public Put call(String v) throws Exception {
+      String[] part = v.split(",");
+      Put put = new Put(Bytes.toBytes(part[0]));
+
+      put.addColumn(Bytes.toBytes(part[1]),
+              Bytes.toBytes(part[2]),
+              Bytes.toBytes(part[3]));
+      return put;
+    }
+
+  }
+}
diff --git a/spark/hbase-spark/src/main/protobuf/SparkFilter.proto b/spark/hbase-spark/src/main/protobuf/SparkFilter.proto
new file mode 100644 (file)
index 0000000..e16c551
--- /dev/null
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are used for Spark filters
+// over in the hbase-spark module
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.spark.protobuf.generated";
+option java_outer_classname = "SparkFilterProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+message SQLPredicatePushDownCellToColumnMapping {
+  required bytes column_family = 1;
+  required bytes qualifier = 2;
+  required string column_name = 3;
+}
+
+message SQLPredicatePushDownFilter {
+  required string dynamic_logic_expression = 1;
+  repeated bytes value_from_query_array = 2;
+  repeated SQLPredicatePushDownCellToColumnMapping cell_to_column_mapping = 3;
+  optional string encoderClassName = 4;
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/BulkLoadPartitioner.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/BulkLoadPartitioner.scala
new file mode 100644 (file)
index 0000000..9442c50
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.util
+import java.util.Comparator
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.spark.Partitioner
+
+/**
+ * A Partitioner implementation that will separate records to different
+ * HBase Regions based on region splits
+ *
+ * @param startKeys   The start keys for the given table
+ */
+@InterfaceAudience.Public
+class BulkLoadPartitioner(startKeys:Array[Array[Byte]])
+  extends Partitioner {
+  // when table not exist, startKeys = Byte[0][]
+  override def numPartitions: Int = if (startKeys.length == 0) 1 else startKeys.length
+
+  override def getPartition(key: Any): Int = {
+
+    val comparator: Comparator[Array[Byte]] = new Comparator[Array[Byte]] {
+      override def compare(o1: Array[Byte], o2: Array[Byte]): Int = {
+        Bytes.compareTo(o1, o2)
+      }
+    }
+
+    val rowKey:Array[Byte] =
+      key match {
+        case qualifier: KeyFamilyQualifier =>
+          qualifier.rowKey
+        case wrapper: ByteArrayWrapper =>
+          wrapper.value
+        case _ =>
+          key.asInstanceOf[Array[Byte]]
+      }
+    var partition = util.Arrays.binarySearch(startKeys, rowKey, comparator)
+    if (partition < 0)
+      partition = partition * -1 + -2
+    if (partition < 0)
+      partition = 0
+    partition
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala
new file mode 100644 (file)
index 0000000..2d0be38
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Bytes
+
+@InterfaceAudience.Public
+class ByteArrayComparable(val bytes:Array[Byte], val offset:Int = 0, var length:Int = -1)
+  extends Comparable[ByteArrayComparable] {
+
+  if (length == -1) {
+    length = bytes.length
+  }
+
+  override def compareTo(o: ByteArrayComparable): Int = {
+    Bytes.compareTo(bytes, offset, length, o.bytes, o.offset, o.length)
+  }
+
+  override def hashCode(): Int = {
+    Bytes.hashCode(bytes, offset, length)
+  }
+
+  override def equals (obj: Any): Boolean = {
+    obj match {
+      case b: ByteArrayComparable =>
+        Bytes.equals(bytes, offset, length, b.bytes, b.offset, b.length)
+      case _ =>
+        false
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayWrapper.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayWrapper.scala
new file mode 100644 (file)
index 0000000..738fa45
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark
+
+import java.io.Serializable
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Bytes
+
+/**
+ * This is a wrapper over a byte array so it can work as
+ * a key in a hashMap
+ *
+ * @param value The Byte Array value
+ */
+@InterfaceAudience.Public
+class ByteArrayWrapper (var value:Array[Byte])
+  extends Comparable[ByteArrayWrapper] with Serializable {
+  override def compareTo(valueOther: ByteArrayWrapper): Int = {
+    Bytes.compareTo(value,valueOther.value)
+  }
+  override def equals(o2: Any): Boolean = {
+    o2 match {
+      case wrapper: ByteArrayWrapper =>
+        Bytes.equals(value, wrapper.value)
+      case _ =>
+        false
+    }
+  }
+  override def hashCode():Int = {
+    Bytes.hashCode(value)
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ColumnFamilyQualifierMapKeyWrapper.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ColumnFamilyQualifierMapKeyWrapper.scala
new file mode 100644 (file)
index 0000000..3037001
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Bytes
+
+/**
+ * A wrapper class that will allow both columnFamily and qualifier to
+ * be the key of a hashMap.  Also allow for finding the value in a hashmap
+ * with out cloning the HBase value from the HBase Cell object
+ * @param columnFamily       ColumnFamily byte array
+ * @param columnFamilyOffSet Offset of columnFamily value in the array
+ * @param columnFamilyLength Length of the columnFamily value in the columnFamily array
+ * @param qualifier          Qualifier byte array
+ * @param qualifierOffSet    Offset of qualifier value in the array
+ * @param qualifierLength    Length of the qualifier value with in the array
+ */
+@InterfaceAudience.Public
+class ColumnFamilyQualifierMapKeyWrapper(val columnFamily:Array[Byte],
+                                         val columnFamilyOffSet:Int,
+                                         val columnFamilyLength:Int,
+                                         val qualifier:Array[Byte],
+                                         val qualifierOffSet:Int,
+                                         val qualifierLength:Int)
+  extends Serializable{
+
+  override def equals(other:Any): Boolean = {
+    val otherWrapper = other.asInstanceOf[ColumnFamilyQualifierMapKeyWrapper]
+
+    Bytes.compareTo(columnFamily,
+      columnFamilyOffSet,
+      columnFamilyLength,
+      otherWrapper.columnFamily,
+      otherWrapper.columnFamilyOffSet,
+      otherWrapper.columnFamilyLength) == 0 && Bytes.compareTo(qualifier,
+        qualifierOffSet,
+        qualifierLength,
+        otherWrapper.qualifier,
+        otherWrapper.qualifierOffSet,
+        otherWrapper.qualifierLength) == 0
+  }
+
+  override def hashCode():Int = {
+    Bytes.hashCode(columnFamily, columnFamilyOffSet, columnFamilyLength) +
+      Bytes.hashCode(qualifier, qualifierOffSet, qualifierLength)
+  }
+
+  def cloneColumnFamily():Array[Byte] = {
+    val resultArray = new Array[Byte](columnFamilyLength)
+    System.arraycopy(columnFamily, columnFamilyOffSet, resultArray, 0, columnFamilyLength)
+    resultArray
+  }
+
+  def cloneQualifier():Array[Byte] = {
+    val resultArray = new Array[Byte](qualifierLength)
+    System.arraycopy(qualifier, qualifierOffSet, resultArray, 0, qualifierLength)
+    resultArray
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
new file mode 100644 (file)
index 0000000..4e05695
--- /dev/null
@@ -0,0 +1,1222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.util
+import java.util.concurrent.ConcurrentLinkedQueue
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client._
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.hadoop.hbase.mapred.TableOutputFormat
+import org.apache.hadoop.hbase.spark.datasources._
+import org.apache.hadoop.hbase.types._
+import org.apache.hadoop.hbase.util.{Bytes, PositionedByteRange, SimplePositionedMutableByteRange}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.HTableDescriptor
+import org.apache.hadoop.hbase.HColumnDescriptor
+import org.apache.hadoop.hbase.TableName
+import org.apache.hadoop.hbase.CellUtil
+import org.apache.hadoop.mapred.JobConf
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.{DataFrame, SaveMode, Row, SQLContext}
+import org.apache.spark.sql.sources._
+import org.apache.spark.sql.types._
+
+import scala.collection.mutable
+
+/**
+ * DefaultSource for integration with Spark's dataframe datasources.
+ * This class will produce a relationProvider based on input given to it from spark
+ *
+ * This class needs to stay in the current package 'org.apache.hadoop.hbase.spark'
+ * for Spark to match the hbase data source name.
+ *
+ * In all this DefaultSource support the following datasource functionality
+ * - Scan range pruning through filter push down logic based on rowKeys
+ * - Filter push down logic on HBase Cells
+ * - Qualifier filtering based on columns used in the SparkSQL statement
+ * - Type conversions of basic SQL types.  All conversions will be
+ *   Through the HBase Bytes object commands.
+ */
+@InterfaceAudience.Private
+class DefaultSource extends RelationProvider  with CreatableRelationProvider with Logging {
+  /**
+   * Is given input from SparkSQL to construct a BaseRelation
+    *
+    * @param sqlContext SparkSQL context
+   * @param parameters Parameters given to us from SparkSQL
+   * @return           A BaseRelation Object
+   */
+  override def createRelation(sqlContext: SQLContext,
+                              parameters: Map[String, String]):
+  BaseRelation = {
+    new HBaseRelation(parameters, None)(sqlContext)
+  }
+
+
+  override def createRelation(
+      sqlContext: SQLContext,
+      mode: SaveMode,
+      parameters: Map[String, String],
+      data: DataFrame): BaseRelation = {
+    val relation = HBaseRelation(parameters, Some(data.schema))(sqlContext)
+    relation.createTable()
+    relation.insert(data, false)
+    relation
+  }
+}
+
+/**
+ * Implementation of Spark BaseRelation that will build up our scan logic
+ * , do the scan pruning, filter push down, and value conversions
+  *
+  * @param sqlContext              SparkSQL context
+ */
+@InterfaceAudience.Private
+case class HBaseRelation (
+    @transient parameters: Map[String, String],
+    userSpecifiedSchema: Option[StructType]
+  )(@transient val sqlContext: SQLContext)
+  extends BaseRelation with PrunedFilteredScan  with InsertableRelation  with Logging {
+  val timestamp = parameters.get(HBaseSparkConf.TIMESTAMP).map(_.toLong)
+  val minTimestamp = parameters.get(HBaseSparkConf.TIMERANGE_START).map(_.toLong)
+  val maxTimestamp = parameters.get(HBaseSparkConf.TIMERANGE_END).map(_.toLong)
+  val maxVersions = parameters.get(HBaseSparkConf.MAX_VERSIONS).map(_.toInt)
+  val encoderClsName = parameters.get(HBaseSparkConf.QUERY_ENCODER).getOrElse(HBaseSparkConf.DEFAULT_QUERY_ENCODER)
+
+  @transient val encoder = JavaBytesEncoder.create(encoderClsName)
+
+  val catalog = HBaseTableCatalog(parameters)
+  def tableName = catalog.name
+  val configResources = parameters.get(HBaseSparkConf.HBASE_CONFIG_LOCATION)
+  val useHBaseContext =  parameters.get(HBaseSparkConf.USE_HBASECONTEXT).map(_.toBoolean).getOrElse(HBaseSparkConf.DEFAULT_USE_HBASECONTEXT)
+  val usePushDownColumnFilter = parameters.get(HBaseSparkConf.PUSHDOWN_COLUMN_FILTER)
+    .map(_.toBoolean).getOrElse(HBaseSparkConf.DEFAULT_PUSHDOWN_COLUMN_FILTER)
+
+  // The user supplied per table parameter will overwrite global ones in SparkConf
+  val blockCacheEnable = parameters.get(HBaseSparkConf.QUERY_CACHEBLOCKS).map(_.toBoolean)
+    .getOrElse(
+      sqlContext.sparkContext.getConf.getBoolean(
+        HBaseSparkConf.QUERY_CACHEBLOCKS, HBaseSparkConf.DEFAULT_QUERY_CACHEBLOCKS))
+  val cacheSize = parameters.get(HBaseSparkConf.QUERY_CACHEDROWS).map(_.toInt)
+    .getOrElse(
+      sqlContext.sparkContext.getConf.getInt(
+      HBaseSparkConf.QUERY_CACHEDROWS, -1))
+  val batchNum = parameters.get(HBaseSparkConf.QUERY_BATCHSIZE).map(_.toInt)
+    .getOrElse(sqlContext.sparkContext.getConf.getInt(
+    HBaseSparkConf.QUERY_BATCHSIZE,  -1))
+
+  val bulkGetSize =  parameters.get(HBaseSparkConf.BULKGET_SIZE).map(_.toInt)
+    .getOrElse(sqlContext.sparkContext.getConf.getInt(
+    HBaseSparkConf.BULKGET_SIZE,  HBaseSparkConf.DEFAULT_BULKGET_SIZE))
+
+  //create or get latest HBaseContext
+  val hbaseContext:HBaseContext = if (useHBaseContext) {
+    LatestHBaseContextCache.latest
+  } else {
+    val config = HBaseConfiguration.create()
+    configResources.map(resource => resource.split(",").foreach(r => config.addResource(r)))
+    new HBaseContext(sqlContext.sparkContext, config)
+  }
+
+  val wrappedConf = new SerializableConfiguration(hbaseContext.config)
+  def hbaseConf = wrappedConf.value
+
+  /**
+   * Generates a Spark SQL schema objeparametersct so Spark SQL knows what is being
+   * provided by this BaseRelation
+   *
+   * @return schema generated from the SCHEMA_COLUMNS_MAPPING_KEY value
+   */
+  override val schema: StructType = userSpecifiedSchema.getOrElse(catalog.toDataType)
+
+
+
+  def createTable() {
+    val numReg = parameters.get(HBaseTableCatalog.newTable).map(x => x.toInt).getOrElse(0)
+    val startKey =  Bytes.toBytes(
+      parameters.get(HBaseTableCatalog.regionStart)
+        .getOrElse(HBaseTableCatalog.defaultRegionStart))
+    val endKey = Bytes.toBytes(
+      parameters.get(HBaseTableCatalog.regionEnd)
+        .getOrElse(HBaseTableCatalog.defaultRegionEnd))
+    if (numReg > 3) {
+      val tName = TableName.valueOf(catalog.name)
+      val cfs = catalog.getColumnFamilies
+
+      val connection = HBaseConnectionCache.getConnection(hbaseConf)
+      // Initialize hBase table if necessary
+      val admin = connection.getAdmin
+      try {
+        if (!admin.isTableAvailable(tName)) {
+          val tableDesc = new HTableDescriptor(tName)
+          cfs.foreach { x =>
+            val cf = new HColumnDescriptor(x.getBytes())
+            logDebug(s"add family $x to ${catalog.name}")
+            tableDesc.addFamily(cf)
+          }
+          val splitKeys = Bytes.split(startKey, endKey, numReg);
+          admin.createTable(tableDesc, splitKeys)
+
+        }
+      }finally {
+        admin.close()
+        connection.close()
+      }
+    } else {
+      logInfo(
+        s"""${HBaseTableCatalog.newTable}
+           |is not defined or no larger than 3, skip the create table""".stripMargin)
+    }
+  }
+
+  /**
+    *
+    * @param data
+    * @param overwrite
+    */
+  override def insert(data: DataFrame, overwrite: Boolean): Unit = {
+    val jobConfig: JobConf = new JobConf(hbaseConf, this.getClass)
+    jobConfig.setOutputFormat(classOf[TableOutputFormat])
+    jobConfig.set(TableOutputFormat.OUTPUT_TABLE, catalog.name)
+    var count = 0
+    val rkFields = catalog.getRowKey
+    val rkIdxedFields = rkFields.map{ case x =>
+      (schema.fieldIndex(x.colName), x)
+    }
+    val colsIdxedFields = schema
+      .fieldNames
+      .partition( x => rkFields.map(_.colName).contains(x))
+      ._2.map(x => (schema.fieldIndex(x), catalog.getField(x)))
+    val rdd = data.rdd
+    def convertToPut(row: Row) = {
+      // construct bytes for row key
+      val rowBytes = rkIdxedFields.map { case (x, y) =>
+        Utils.toBytes(row(x), y)
+      }
+      val rLen = rowBytes.foldLeft(0) { case (x, y) =>
+        x + y.length
+      }
+      val rBytes = new Array[Byte](rLen)
+      var offset = 0
+      rowBytes.foreach { x =>
+        System.arraycopy(x, 0, rBytes, offset, x.length)
+        offset += x.length
+      }
+      val put = timestamp.fold(new Put(rBytes))(new Put(rBytes, _))
+
+      colsIdxedFields.foreach { case (x, y) =>
+        val b = Utils.toBytes(row(x), y)
+        put.addColumn(Bytes.toBytes(y.cf), Bytes.toBytes(y.col), b)
+      }
+      count += 1
+      (new ImmutableBytesWritable, put)
+    }
+    rdd.map(convertToPut(_)).saveAsHadoopDataset(jobConfig)
+  }
+
+  def getIndexedProjections(requiredColumns: Array[String]): Seq[(Field, Int)] = {
+    requiredColumns.map(catalog.sMap.getField(_)).zipWithIndex
+  }
+
+
+  /**
+    * Takes a HBase Row object and parses all of the fields from it.
+    * This is independent of which fields were requested from the key
+    * Because we have all the data it's less complex to parse everything.
+    *
+    * @param row the retrieved row from hbase.
+    * @param keyFields all of the fields in the row key, ORDERED by their order in the row key.
+    */
+  def parseRowKey(row: Array[Byte], keyFields: Seq[Field]): Map[Field, Any] = {
+    keyFields.foldLeft((0, Seq[(Field, Any)]()))((state, field) => {
+      val idx = state._1
+      val parsed = state._2
+      if (field.length != -1) {
+        val value = Utils.hbaseFieldToScalaType(field, row, idx, field.length)
+        // Return the new index and appended value
+        (idx + field.length, parsed ++ Seq((field, value)))
+      } else {
+        field.dt match {
+          case StringType =>
+            val pos = row.indexOf(HBaseTableCatalog.delimiter, idx)
+            if (pos == -1 || pos > row.length) {
+              // this is at the last dimension
+              val value = Utils.hbaseFieldToScalaType(field, row, idx, row.length)
+              (row.length + 1, parsed ++ Seq((field, value)))
+            } else {
+              val value = Utils.hbaseFieldToScalaType(field, row, idx, pos - idx)
+              (pos, parsed ++ Seq((field, value)))
+            }
+          // We don't know the length, assume it extends to the end of the rowkey.
+          case _ => (row.length + 1, parsed ++ Seq((field, Utils.hbaseFieldToScalaType(field, row, idx, row.length))))
+        }
+      }
+    })._2.toMap
+  }
+
+  def buildRow(fields: Seq[Field], result: Result): Row = {
+    val r = result.getRow
+    val keySeq = parseRowKey(r, catalog.getRowKey)
+    val valueSeq = fields.filter(!_.isRowKey).map { x =>
+      val kv = result.getColumnLatestCell(Bytes.toBytes(x.cf), Bytes.toBytes(x.col))
+      if (kv == null || kv.getValueLength == 0) {
+        (x, null)
+      } else {
+        val v = CellUtil.cloneValue(kv)
+        (x, x.dt match {
+          // Here, to avoid arraycopy, return v directly instead of calling hbaseFieldToScalaType
+          case BinaryType => v
+          case _ => Utils.hbaseFieldToScalaType(x, v, 0, v.length)
+        })
+      }
+    }.toMap
+    val unionedRow = keySeq ++ valueSeq
+    // Return the row ordered by the requested order
+    Row.fromSeq(fields.map(unionedRow.get(_).getOrElse(null)))
+  }
+
+  /**
+   * Here we are building the functionality to populate the resulting RDD[Row]
+   * Here is where we will do the following:
+   * - Filter push down
+   * - Scan or GetList pruning
+   * - Executing our scan(s) or/and GetList to generate result
+   *
+   * @param requiredColumns The columns that are being requested by the requesting query
+   * @param filters         The filters that are being applied by the requesting query
+   * @return                RDD will all the results from HBase needed for SparkSQL to
+   *                        execute the query on
+   */
+  override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
+
+    val pushDownTuple = buildPushDownPredicatesResource(filters)
+    val pushDownRowKeyFilter = pushDownTuple._1
+    var pushDownDynamicLogicExpression = pushDownTuple._2
+    val valueArray = pushDownTuple._3
+
+    if (!usePushDownColumnFilter) {
+      pushDownDynamicLogicExpression = null
+    }
+
+    logDebug("pushDownRowKeyFilter:           " + pushDownRowKeyFilter.ranges)
+    if (pushDownDynamicLogicExpression != null) {
+      logDebug("pushDownDynamicLogicExpression: " +
+        pushDownDynamicLogicExpression.toExpressionString)
+    }
+    logDebug("valueArray:                     " + valueArray.length)
+
+    val requiredQualifierDefinitionList =
+      new mutable.MutableList[Field]
+
+    requiredColumns.foreach( c => {
+      val field = catalog.getField(c)
+      requiredQualifierDefinitionList += field
+    })
+
+    //retain the information for unit testing checks
+    DefaultSourceStaticUtils.populateLatestExecutionRules(pushDownRowKeyFilter,
+      pushDownDynamicLogicExpression)
+
+    val getList = new util.ArrayList[Get]()
+    val rddList = new util.ArrayList[RDD[Row]]()
+
+    //add points to getList
+    pushDownRowKeyFilter.points.foreach(p => {
+      val get = new Get(p)
+      requiredQualifierDefinitionList.foreach( d => {
+        if (d.isRowKey)
+          get.addColumn(d.cfBytes, d.colBytes)
+      })
+      getList.add(get)
+    })
+
+    val pushDownFilterJava = if (usePushDownColumnFilter && pushDownDynamicLogicExpression != null) {
+        Some(new SparkSQLPushDownFilter(pushDownDynamicLogicExpression,
+          valueArray, requiredQualifierDefinitionList, encoderClsName))
+    } else {
+      None
+    }
+    val hRdd = new HBaseTableScanRDD(this, hbaseContext, pushDownFilterJava, requiredQualifierDefinitionList.seq)
+    pushDownRowKeyFilter.points.foreach(hRdd.addPoint(_))
+    pushDownRowKeyFilter.ranges.foreach(hRdd.addRange(_))
+
+    var resultRDD: RDD[Row] = {
+      val tmp = hRdd.map{ r =>
+        val indexedFields = getIndexedProjections(requiredColumns).map(_._1)
+        buildRow(indexedFields, r)
+
+      }
+      if (tmp.partitions.size > 0) {
+        tmp
+      } else {
+        null
+      }
+    }
+
+    if (resultRDD == null) {
+      val scan = new Scan()
+      scan.setCacheBlocks(blockCacheEnable)
+      scan.setBatch(batchNum)
+      scan.setCaching(cacheSize)
+      requiredQualifierDefinitionList.foreach( d =>
+        scan.addColumn(d.cfBytes, d.colBytes))
+
+      val rdd = hbaseContext.hbaseRDD(TableName.valueOf(tableName), scan).map(r => {
+        val indexedFields = getIndexedProjections(requiredColumns).map(_._1)
+        buildRow(indexedFields, r._2)
+      })
+      resultRDD=rdd
+    }
+    resultRDD
+  }
+
+  def buildPushDownPredicatesResource(filters: Array[Filter]):
+  (RowKeyFilter, DynamicLogicExpression, Array[Array[Byte]]) = {
+    var superRowKeyFilter:RowKeyFilter = null
+    val queryValueList = new mutable.MutableList[Array[Byte]]
+    var superDynamicLogicExpression: DynamicLogicExpression = null
+
+    filters.foreach( f => {
+      val rowKeyFilter = new RowKeyFilter()
+      val logicExpression = transverseFilterTree(rowKeyFilter, queryValueList, f)
+      if (superDynamicLogicExpression == null) {
+        superDynamicLogicExpression = logicExpression
+        superRowKeyFilter = rowKeyFilter
+      } else {
+        superDynamicLogicExpression =
+          new AndLogicExpression(superDynamicLogicExpression, logicExpression)
+        superRowKeyFilter.mergeIntersect(rowKeyFilter)
+      }
+
+    })
+
+    val queryValueArray = queryValueList.toArray
+
+    if (superRowKeyFilter == null) {
+      superRowKeyFilter = new RowKeyFilter
+    }
+
+    (superRowKeyFilter, superDynamicLogicExpression, queryValueArray)
+  }
+
+  /**
+    * For some codec, the order may be inconsistent between java primitive
+    * type and its byte array. We may have to  split the predicates on some
+    * of the java primitive type into multiple predicates. The encoder will take
+    * care of it and returning the concrete ranges.
+    *
+    * For example in naive codec,  some of the java primitive types have to be split into multiple
+    * predicates, and union these predicates together to make the predicates be performed correctly.
+    * For example, if we have "COLUMN < 2", we will transform it into
+    * "0 <= COLUMN < 2 OR Integer.MIN_VALUE <= COLUMN <= -1"
+    */
+
+  def transverseFilterTree(parentRowKeyFilter:RowKeyFilter,
+                                  valueArray:mutable.MutableList[Array[Byte]],
+                                  filter:Filter): DynamicLogicExpression = {
+    filter match {
+      case EqualTo(attr, value) =>
+        val field = catalog.getField(attr)
+        if (field != null) {
+          if (field.isRowKey) {
+            parentRowKeyFilter.mergeIntersect(new RowKeyFilter(
+              DefaultSourceStaticUtils.getByteValue(field,
+                value.toString), null))
+          }
+          val byteValue =
+            DefaultSourceStaticUtils.getByteValue(field, value.toString)
+          valueArray += byteValue
+        }
+        new EqualLogicExpression(attr, valueArray.length - 1, false)
+
+      /**
+        * encoder may split the predicates into multiple byte array boundaries.
+        * Each boundaries is mapped into the RowKeyFilter and then is unioned by the reduce
+        * operation. If the data type is not supported, b will be None, and there is
+        * no operation happens on the parentRowKeyFilter.
+        *
+        * Note that because LessThan is not inclusive, thus the first bound should be exclusive,
+        * which is controlled by inc.
+        *
+        * The other predicates, i.e., GreaterThan/LessThanOrEqual/GreaterThanOrEqual follows
+        * the similar logic.
+        */
+      case LessThan(attr, value) =>
+        val field = catalog.getField(attr)
+        if (field != null) {
+          if (field.isRowKey) {
+            val b = encoder.ranges(value)
+            var inc = false
+            b.map(_.less.map { x =>
+              val r = new RowKeyFilter(null,
+                new ScanRange(x.upper, inc, x.low, true)
+              )
+              inc = true
+              r
+            }).map { x =>
+              x.reduce { (i, j) =>
+                i.mergeUnion(j)
+              }
+            }.map(parentRowKeyFilter.mergeIntersect(_))
+          }
+          val byteValue = encoder.encode(field.dt, value)
+          valueArray += byteValue
+        }
+        new LessThanLogicExpression(attr, valueArray.length - 1)
+      case GreaterThan(attr, value) =>
+        val field = catalog.getField(attr)
+        if (field != null) {
+          if (field.isRowKey) {
+            val b = encoder.ranges(value)
+            var inc = false
+            b.map(_.greater.map{x =>
+              val r = new RowKeyFilter(null,
+                new ScanRange(x.upper, true, x.low, inc))
+              inc = true
+              r
+            }).map { x =>
+              x.reduce { (i, j) =>
+                i.mergeUnion(j)
+              }
+            }.map(parentRowKeyFilter.mergeIntersect(_))
+          }
+          val byteValue = encoder.encode(field.dt, value)
+          valueArray += byteValue
+        }
+        new GreaterThanLogicExpression(attr, valueArray.length - 1)
+      case LessThanOrEqual(attr, value) =>
+        val field = catalog.getField(attr)
+        if (field != null) {
+          if (field.isRowKey) {
+            val b = encoder.ranges(value)
+            b.map(_.less.map(x =>
+              new RowKeyFilter(null,
+                new ScanRange(x.upper, true, x.low, true))))
+              .map { x =>
+                x.reduce{ (i, j) =>
+                  i.mergeUnion(j)
+                }
+              }.map(parentRowKeyFilter.mergeIntersect(_))
+          }
+          val byteValue = encoder.encode(field.dt, value)
+          valueArray += byteValue
+        }
+        new LessThanOrEqualLogicExpression(attr, valueArray.length - 1)
+      case GreaterThanOrEqual(attr, value) =>
+        val field = catalog.getField(attr)
+        if (field != null) {
+          if (field.isRowKey) {
+            val b = encoder.ranges(value)
+            b.map(_.greater.map(x =>
+              new RowKeyFilter(null,
+                new ScanRange(x.upper, true, x.low, true))))
+              .map { x =>
+                x.reduce { (i, j) =>
+                  i.mergeUnion(j)
+                }
+              }.map(parentRowKeyFilter.mergeIntersect(_))
+          }
+          val byteValue = encoder.encode(field.dt, value)
+          valueArray += byteValue
+        }
+        new GreaterThanOrEqualLogicExpression(attr, valueArray.length - 1)
+      case Or(left, right) =>
+        val leftExpression = transverseFilterTree(parentRowKeyFilter, valueArray, left)
+        val rightSideRowKeyFilter = new RowKeyFilter
+        val rightExpression = transverseFilterTree(rightSideRowKeyFilter, valueArray, right)
+
+        parentRowKeyFilter.mergeUnion(rightSideRowKeyFilter)
+
+        new OrLogicExpression(leftExpression, rightExpression)
+      case And(left, right) =>
+
+        val leftExpression = transverseFilterTree(parentRowKeyFilter, valueArray, left)
+        val rightSideRowKeyFilter = new RowKeyFilter
+        val rightExpression = transverseFilterTree(rightSideRowKeyFilter, valueArray, right)
+        parentRowKeyFilter.mergeIntersect(rightSideRowKeyFilter)
+
+        new AndLogicExpression(leftExpression, rightExpression)
+      case IsNull(attr) =>
+        new IsNullLogicExpression(attr, false)
+      case IsNotNull(attr) =>
+        new IsNullLogicExpression(attr, true)
+      case _ =>
+        new PassThroughLogicExpression
+    }
+  }
+}
+
+/**
+ * Construct to contain a single scan ranges information.  Also
+ * provide functions to merge with other scan ranges through AND
+ * or OR operators
+ *
+ * @param upperBound          Upper bound of scan
+ * @param isUpperBoundEqualTo Include upper bound value in the results
+ * @param lowerBound          Lower bound of scan
+ * @param isLowerBoundEqualTo Include lower bound value in the results
+ */
+@InterfaceAudience.Private
+class ScanRange(var upperBound:Array[Byte], var isUpperBoundEqualTo:Boolean,
+                var lowerBound:Array[Byte], var isLowerBoundEqualTo:Boolean)
+  extends Serializable {
+
+  /**
+   * Function to merge another scan object through a AND operation
+    *
+    * @param other Other scan object
+   */
+  def mergeIntersect(other:ScanRange): Unit = {
+    val upperBoundCompare = compareRange(upperBound, other.upperBound)
+    val lowerBoundCompare = compareRange(lowerBound, other.lowerBound)
+
+    upperBound = if (upperBoundCompare <0) upperBound else other.upperBound
+    lowerBound = if (lowerBoundCompare >0) lowerBound else other.lowerBound
+
+    isLowerBoundEqualTo = if (lowerBoundCompare == 0)
+      isLowerBoundEqualTo && other.isLowerBoundEqualTo
+    else isLowerBoundEqualTo
+
+    isUpperBoundEqualTo = if (upperBoundCompare == 0)
+      isUpperBoundEqualTo && other.isUpperBoundEqualTo
+    else isUpperBoundEqualTo
+  }
+
+  /**
+   * Function to merge another scan object through a OR operation
+    *
+    * @param other Other scan object
+   */
+  def mergeUnion(other:ScanRange): Unit = {
+
+    val upperBoundCompare = compareRange(upperBound, other.upperBound)
+    val lowerBoundCompare = compareRange(lowerBound, other.lowerBound)
+
+    upperBound = if (upperBoundCompare >0) upperBound else other.upperBound
+    lowerBound = if (lowerBoundCompare <0) lowerBound else other.lowerBound
+
+    isLowerBoundEqualTo = if (lowerBoundCompare == 0)
+      isLowerBoundEqualTo || other.isLowerBoundEqualTo
+    else if (lowerBoundCompare < 0) isLowerBoundEqualTo else other.isLowerBoundEqualTo
+
+    isUpperBoundEqualTo = if (upperBoundCompare == 0)
+      isUpperBoundEqualTo || other.isUpperBoundEqualTo
+    else if (upperBoundCompare < 0) other.isUpperBoundEqualTo else isUpperBoundEqualTo
+  }
+
+  /**
+   * Common function to see if this scan over laps with another
+   *
+   * Reference Visual
+   *
+   * A                           B
+   * |---------------------------|
+   *   LL--------------LU
+   *        RL--------------RU
+   *
+   * A = lowest value is byte[0]
+   * B = highest value is null
+   * LL = Left Lower Bound
+   * LU = Left Upper Bound
+   * RL = Right Lower Bound
+   * RU = Right Upper Bound
+   *
+   * @param other Other scan object
+   * @return      True is overlap false is not overlap
+   */
+  def getOverLapScanRange(other:ScanRange): ScanRange = {
+
+    var leftRange:ScanRange = null
+    var rightRange:ScanRange = null
+
+    // First identify the Left range
+    // Also lower bound can't be null
+    if (compareRange(lowerBound, other.lowerBound) < 0 ||
+      compareRange(upperBound, other.upperBound) < 0) {
+      leftRange = this
+      rightRange = other
+    } else {
+      leftRange = other
+      rightRange = this
+    }
+
+    if (hasOverlap(leftRange, rightRange)) {
+      // Find the upper bound and lower bound
+      if (compareRange(leftRange.upperBound, rightRange.upperBound) >= 0) {
+        new ScanRange(rightRange.upperBound, rightRange.isUpperBoundEqualTo,
+          rightRange.lowerBound, rightRange.isLowerBoundEqualTo)
+      } else {
+        new ScanRange(leftRange.upperBound, leftRange.isUpperBoundEqualTo,
+          rightRange.lowerBound, rightRange.isLowerBoundEqualTo)
+      }
+    } else {
+      null
+    }
+  }
+
+  /**
+    * The leftRange.upperBound has to be larger than the rightRange's lowerBound.
+    * Otherwise, there is no overlap.
+    *
+    * @param left: The range with the smaller lowBound
+    * @param right: The range with the larger lowBound
+    * @return Whether two ranges have overlap.
+    */
+
+  def hasOverlap(left: ScanRange, right: ScanRange): Boolean = {
+    compareRange(left.upperBound, right.lowerBound) >= 0
+  }
+
+  /**
+   * Special compare logic because we can have null values
+   * for left or right bound
+   *
+   * @param left  Left byte array
+   * @param right Right byte array
+   * @return      0 for equals 1 is left is greater and -1 is right is greater
+   */
+  def compareRange(left:Array[Byte], right:Array[Byte]): Int = {
+    if (left == null && right == null) 0
+    else if (left == null && right != null) 1
+    else if (left != null && right == null) -1
+    else Bytes.compareTo(left, right)
+  }
+
+  /**
+   *
+   * @return
+   */
+  def containsPoint(point:Array[Byte]): Boolean = {
+    val lowerCompare = compareRange(point, lowerBound)
+    val upperCompare = compareRange(point, upperBound)
+
+    ((isLowerBoundEqualTo && lowerCompare >= 0) ||
+      (!isLowerBoundEqualTo && lowerCompare > 0)) &&
+      ((isUpperBoundEqualTo && upperCompare <= 0) ||
+        (!isUpperBoundEqualTo && upperCompare < 0))
+
+  }
+  override def toString:String = {
+    "ScanRange:(upperBound:" + Bytes.toString(upperBound) +
+      ",isUpperBoundEqualTo:" + isUpperBoundEqualTo + ",lowerBound:" +
+      Bytes.toString(lowerBound) + ",isLowerBoundEqualTo:" + isLowerBoundEqualTo + ")"
+  }
+}
+
+/**
+ * Contains information related to a filters for a given column.
+ * This can contain many ranges or points.
+ *
+ * @param currentPoint the initial point when the filter is created
+ * @param currentRange the initial scanRange when the filter is created
+ */
+@InterfaceAudience.Private
+class ColumnFilter (currentPoint:Array[Byte] = null,
+                     currentRange:ScanRange = null,
+                     var points:mutable.MutableList[Array[Byte]] =
+                     new mutable.MutableList[Array[Byte]](),
+                     var ranges:mutable.MutableList[ScanRange] =
+                     new mutable.MutableList[ScanRange]() ) extends Serializable {
+  //Collection of ranges
+  if (currentRange != null ) ranges.+=(currentRange)
+
+  //Collection of points
+  if (currentPoint != null) points.+=(currentPoint)
+
+  /**
+   * This will validate a give value through the filter's points and/or ranges
+   * the result will be if the value passed the filter
+   *
+   * @param value       Value to be validated
+   * @param valueOffSet The offset of the value
+   * @param valueLength The length of the value
+   * @return            True is the value passes the filter false if not
+   */
+  def validate(value:Array[Byte], valueOffSet:Int, valueLength:Int):Boolean = {
+    var result = false
+
+    points.foreach( p => {
+      if (Bytes.equals(p, 0, p.length, value, valueOffSet, valueLength)) {
+        result = true
+      }
+    })
+
+    ranges.foreach( r => {
+      val upperBoundPass = r.upperBound == null ||
+        (r.isUpperBoundEqualTo &&
+          Bytes.compareTo(r.upperBound, 0, r.upperBound.length,
+            value, valueOffSet, valueLength) >= 0) ||
+        (!r.isUpperBoundEqualTo &&
+          Bytes.compareTo(r.upperBound, 0, r.upperBound.length,
+            value, valueOffSet, valueLength) > 0)
+
+      val lowerBoundPass = r.lowerBound == null || r.lowerBound.length == 0
+        (r.isLowerBoundEqualTo &&
+          Bytes.compareTo(r.lowerBound, 0, r.lowerBound.length,
+            value, valueOffSet, valueLength) <= 0) ||
+        (!r.isLowerBoundEqualTo &&
+          Bytes.compareTo(r.lowerBound, 0, r.lowerBound.length,
+            value, valueOffSet, valueLength) < 0)
+
+      result = result || (upperBoundPass && lowerBoundPass)
+    })
+    result
+  }
+
+  /**
+   * This will allow us to merge filter logic that is joined to the existing filter
+   * through a OR operator
+   *
+   * @param other Filter to merge
+   */
+  def mergeUnion(other:ColumnFilter): Unit = {
+    other.points.foreach( p => points += p)
+
+    other.ranges.foreach( otherR => {
+      var doesOverLap = false
+      ranges.foreach{ r =>
+        if (r.getOverLapScanRange(otherR) != null) {
+          r.mergeUnion(otherR)
+          doesOverLap = true
+        }}
+      if (!doesOverLap) ranges.+=(otherR)
+    })
+  }
+
+  /**
+   * This will allow us to merge filter logic that is joined to the existing filter
+   * through a AND operator
+   *
+   * @param other Filter to merge
+   */
+  def mergeIntersect(other:ColumnFilter): Unit = {
+    val survivingPoints = new mutable.MutableList[Array[Byte]]()
+    points.foreach( p => {
+      other.points.foreach( otherP => {
+        if (Bytes.equals(p, otherP)) {
+          survivingPoints.+=(p)
+        }
+      })
+    })
+    points = survivingPoints
+
+    val survivingRanges = new mutable.MutableList[ScanRange]()
+
+    other.ranges.foreach( otherR => {
+      ranges.foreach( r => {
+        if (r.getOverLapScanRange(otherR) != null) {
+          r.mergeIntersect(otherR)
+          survivingRanges += r
+        }
+      })
+    })
+    ranges = survivingRanges
+  }
+
+  override def toString:String = {
+    val strBuilder = new StringBuilder
+    strBuilder.append("(points:(")
+    var isFirst = true
+    points.foreach( p => {
+      if (isFirst) isFirst = false
+      else strBuilder.append(",")
+      strBuilder.append(Bytes.toString(p))
+    })
+    strBuilder.append("),ranges:")
+    isFirst = true
+    ranges.foreach( r => {
+      if (isFirst) isFirst = false
+      else strBuilder.append(",")
+      strBuilder.append(r)
+    })
+    strBuilder.append("))")
+    strBuilder.toString()
+  }
+}
+
+/**
+ * A collection of ColumnFilters indexed by column names.
+ *
+ * Also contains merge commends that will consolidate the filters
+ * per column name
+ */
+@InterfaceAudience.Private
+class ColumnFilterCollection {
+  val columnFilterMap = new mutable.HashMap[String, ColumnFilter]
+
+  def clear(): Unit = {
+    columnFilterMap.clear()
+  }
+
+  /**
+   * This will allow us to merge filter logic that is joined to the existing filter
+   * through a OR operator.  This will merge a single columns filter
+   *
+   * @param column The column to be merged
+   * @param other  The other ColumnFilter object to merge
+   */
+  def mergeUnion(column:String, other:ColumnFilter): Unit = {
+    val existingFilter = columnFilterMap.get(column)
+    if (existingFilter.isEmpty) {
+      columnFilterMap.+=((column, other))
+    } else {
+      existingFilter.get.mergeUnion(other)
+    }
+  }
+
+  /**
+   * This will allow us to merge all filters in the existing collection
+   * to the filters in the other collection.  All merges are done as a result
+   * of a OR operator
+   *
+   * @param other The other Column Filter Collection to be merged
+   */
+  def mergeUnion(other:ColumnFilterCollection): Unit = {
+    other.columnFilterMap.foreach( e => {
+      mergeUnion(e._1, e._2)
+    })
+  }
+
+  /**
+   * This will allow us to merge all filters in the existing collection
+   * to the filters in the other collection.  All merges are done as a result
+   * of a AND operator
+   *
+   * @param other The column filter from the other collection
+   */
+  def mergeIntersect(other:ColumnFilterCollection): Unit = {
+    other.columnFilterMap.foreach( e => {
+      val existingColumnFilter = columnFilterMap.get(e._1)
+      if (existingColumnFilter.isEmpty) {
+        columnFilterMap += e
+      } else {
+        existingColumnFilter.get.mergeIntersect(e._2)
+      }
+    })
+  }
+
+  override def toString:String = {
+    val strBuilder = new StringBuilder
+    columnFilterMap.foreach( e => strBuilder.append(e))
+    strBuilder.toString()
+  }
+}
+
+/**
+ * Status object to store static functions but also to hold last executed
+ * information that can be used for unit testing.
+ */
+@InterfaceAudience.Private
+object DefaultSourceStaticUtils {
+
+  val rawInteger = new RawInteger
+  val rawLong = new RawLong
+  val rawFloat = new RawFloat
+  val rawDouble = new RawDouble
+  val rawString = RawString.ASCENDING
+
+  val byteRange = new ThreadLocal[PositionedByteRange] {
+    override def initialValue(): PositionedByteRange = {
+      val range = new SimplePositionedMutableByteRange()
+      range.setOffset(0)
+      range.setPosition(0)
+    }
+  }
+
+  def getFreshByteRange(bytes: Array[Byte]): PositionedByteRange = {
+    getFreshByteRange(bytes, 0, bytes.length)
+  }
+
+  def getFreshByteRange(bytes: Array[Byte], offset: Int = 0, length: Int):
+  PositionedByteRange = {
+    byteRange.get().set(bytes).setLength(length).setOffset(offset)
+  }
+
+  //This will contain the last 5 filters and required fields used in buildScan
+  // These values can be used in unit testing to make sure we are converting
+  // The Spark SQL input correctly
+  val lastFiveExecutionRules =
+    new ConcurrentLinkedQueue[ExecutionRuleForUnitTesting]()
+
+  /**
+   * This method is to populate the lastFiveExecutionRules for unit test perposes
+   * This method is not thread safe.
+   *
+   * @param rowKeyFilter           The rowKey Filter logic used in the last query
+   * @param dynamicLogicExpression The dynamicLogicExpression used in the last query
+   */
+  def populateLatestExecutionRules(rowKeyFilter: RowKeyFilter,
+                                   dynamicLogicExpression: DynamicLogicExpression): Unit = {
+    lastFiveExecutionRules.add(new ExecutionRuleForUnitTesting(
+      rowKeyFilter, dynamicLogicExpression))
+    while (lastFiveExecutionRules.size() > 5) {
+      lastFiveExecutionRules.poll()
+    }
+  }
+
+  /**
+   * This method will convert the result content from HBase into the
+   * SQL value type that is requested by the Spark SQL schema definition
+   *
+   * @param field              The structure of the SparkSQL Column
+   * @param r                       The result object from HBase
+   * @return                        The converted object type
+   */
+  def getValue(field: Field,
+      r: Result): Any = {
+    if (field.isRowKey) {
+      val row = r.getRow
+
+      field.dt match {
+        case IntegerType => rawInteger.decode(getFreshByteRange(row))
+        case LongType => rawLong.decode(getFreshByteRange(row))
+        case FloatType => rawFloat.decode(getFreshByteRange(row))
+        case DoubleType => rawDouble.decode(getFreshByteRange(row))
+        case StringType => rawString.decode(getFreshByteRange(row))
+        case TimestampType => rawLong.decode(getFreshByteRange(row))
+        case _ => Bytes.toString(row)
+      }
+    } else {
+      val cellByteValue =
+        r.getColumnLatestCell(field.cfBytes, field.colBytes)
+      if (cellByteValue == null) null
+      else field.dt match {
+        case IntegerType => rawInteger.decode(getFreshByteRange(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength))
+        case LongType => rawLong.decode(getFreshByteRange(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength))
+        case FloatType => rawFloat.decode(getFreshByteRange(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength))
+        case DoubleType => rawDouble.decode(getFreshByteRange(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength))
+        case StringType => Bytes.toString(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength)
+        case TimestampType => rawLong.decode(getFreshByteRange(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength))
+        case _ => Bytes.toString(cellByteValue.getValueArray,
+          cellByteValue.getValueOffset, cellByteValue.getValueLength)
+      }
+    }
+  }
+
+  /**
+   * This will convert the value from SparkSQL to be stored into HBase using the
+   * right byte Type
+   *
+   * @param value                   String value from SparkSQL
+   * @return                        Returns the byte array to go into HBase
+   */
+  def getByteValue(field: Field,
+      value: String): Array[Byte] = {
+    field.dt match {
+      case IntegerType =>
+        val result = new Array[Byte](Bytes.SIZEOF_INT)
+        val localDataRange = getFreshByteRange(result)
+        rawInteger.encode(localDataRange, value.toInt)
+        localDataRange.getBytes
+      case LongType =>
+        val result = new Array[Byte](Bytes.SIZEOF_LONG)
+        val localDataRange = getFreshByteRange(result)
+        rawLong.encode(localDataRange, value.toLong)
+        localDataRange.getBytes
+      case FloatType =>
+        val result = new Array[Byte](Bytes.SIZEOF_FLOAT)
+        val localDataRange = getFreshByteRange(result)
+        rawFloat.encode(localDataRange, value.toFloat)
+        localDataRange.getBytes
+      case DoubleType =>
+        val result = new Array[Byte](Bytes.SIZEOF_DOUBLE)
+        val localDataRange = getFreshByteRange(result)
+        rawDouble.encode(localDataRange, value.toDouble)
+        localDataRange.getBytes
+      case StringType =>
+        Bytes.toBytes(value)
+      case TimestampType =>
+        val result = new Array[Byte](Bytes.SIZEOF_LONG)
+        val localDataRange = getFreshByteRange(result)
+        rawLong.encode(localDataRange, value.toLong)
+        localDataRange.getBytes
+
+      case _ => Bytes.toBytes(value)
+    }
+  }
+}
+
+/**
+ * Contains information related to a filters for a given column.
+ * This can contain many ranges or points.
+ *
+ * @param currentPoint the initial point when the filter is created
+ * @param currentRange the initial scanRange when the filter is created
+ */
+@InterfaceAudience.Private
+class RowKeyFilter (currentPoint:Array[Byte] = null,
+                    currentRange:ScanRange =
+                    new ScanRange(null, true, new Array[Byte](0), true),
+                    var points:mutable.MutableList[Array[Byte]] =
+                    new mutable.MutableList[Array[Byte]](),
+                    var ranges:mutable.MutableList[ScanRange] =
+                    new mutable.MutableList[ScanRange]() ) extends Serializable {
+  //Collection of ranges
+  if (currentRange != null ) ranges.+=(currentRange)
+
+  //Collection of points
+  if (currentPoint != null) points.+=(currentPoint)
+
+  /**
+   * This will validate a give value through the filter's points and/or ranges
+   * the result will be if the value passed the filter
+   *
+   * @param value       Value to be validated
+   * @param valueOffSet The offset of the value
+   * @param valueLength The length of the value
+   * @return            True is the value passes the filter false if not
+   */
+  def validate(value:Array[Byte], valueOffSet:Int, valueLength:Int):Boolean = {
+    var result = false
+
+    points.foreach( p => {
+      if (Bytes.equals(p, 0, p.length, value, valueOffSet, valueLength)) {
+        result = true
+      }
+    })
+
+    ranges.foreach( r => {
+      val upperBoundPass = r.upperBound == null ||
+        (r.isUpperBoundEqualTo &&
+          Bytes.compareTo(r.upperBound, 0, r.upperBound.length,
+            value, valueOffSet, valueLength) >= 0) ||
+        (!r.isUpperBoundEqualTo &&
+          Bytes.compareTo(r.upperBound, 0, r.upperBound.length,
+            value, valueOffSet, valueLength) > 0)
+
+      val lowerBoundPass = r.lowerBound == null || r.lowerBound.length == 0
+      (r.isLowerBoundEqualTo &&
+        Bytes.compareTo(r.lowerBound, 0, r.lowerBound.length,
+          value, valueOffSet, valueLength) <= 0) ||
+        (!r.isLowerBoundEqualTo &&
+          Bytes.compareTo(r.lowerBound, 0, r.lowerBound.length,
+            value, valueOffSet, valueLength) < 0)
+
+      result = result || (upperBoundPass && lowerBoundPass)
+    })
+    result
+  }
+
+  /**
+   * This will allow us to merge filter logic that is joined to the existing filter
+   * through a OR operator
+   *
+   * @param other Filter to merge
+   */
+  def mergeUnion(other:RowKeyFilter): RowKeyFilter = {
+    other.points.foreach( p => points += p)
+
+    other.ranges.foreach( otherR => {
+      var doesOverLap = false
+      ranges.foreach{ r =>
+        if (r.getOverLapScanRange(otherR) != null) {
+          r.mergeUnion(otherR)
+          doesOverLap = true
+        }}
+      if (!doesOverLap) ranges.+=(otherR)
+    })
+    this
+  }
+
+  /**
+   * This will allow us to merge filter logic that is joined to the existing filter
+   * through a AND operator
+   *
+   * @param other Filter to merge
+   */
+  def mergeIntersect(other:RowKeyFilter): RowKeyFilter = {
+    val survivingPoints = new mutable.MutableList[Array[Byte]]()
+    val didntSurviveFirstPassPoints = new mutable.MutableList[Array[Byte]]()
+    if (points == null || points.length == 0) {
+      other.points.foreach( otherP => {
+        didntSurviveFirstPassPoints += otherP
+      })
+    } else {
+      points.foreach(p => {
+        if (other.points.length == 0) {
+          didntSurviveFirstPassPoints += p
+        } else {
+          other.points.foreach(otherP => {
+            if (Bytes.equals(p, otherP)) {
+              survivingPoints += p
+            } else {
+              didntSurviveFirstPassPoints += p
+            }
+          })
+        }
+      })
+    }
+
+    val survivingRanges = new mutable.MutableList[ScanRange]()
+
+    if (ranges.length == 0) {
+      didntSurviveFirstPassPoints.foreach(p => {
+          survivingPoints += p
+      })
+    } else {
+      ranges.foreach(r => {
+        other.ranges.foreach(otherR => {
+          val overLapScanRange = r.getOverLapScanRange(otherR)
+          if (overLapScanRange != null) {
+            survivingRanges += overLapScanRange
+          }
+        })
+        didntSurviveFirstPassPoints.foreach(p => {
+          if (r.containsPoint(p)) {
+            survivingPoints += p
+          }
+        })
+      })
+    }
+    points = survivingPoints
+    ranges = survivingRanges
+    this
+  }
+
+  override def toString:String = {
+    val strBuilder = new StringBuilder
+    strBuilder.append("(points:(")
+    var isFirst = true
+    points.foreach( p => {
+      if (isFirst) isFirst = false
+      else strBuilder.append(",")
+      strBuilder.append(Bytes.toString(p))
+    })
+    strBuilder.append("),ranges:")
+    isFirst = true
+    ranges.foreach( r => {
+      if (isFirst) isFirst = false
+      else strBuilder.append(",")
+      strBuilder.append(r)
+    })
+    strBuilder.append("))")
+    strBuilder.toString()
+  }
+}
+
+@InterfaceAudience.Private
+class ExecutionRuleForUnitTesting(val rowKeyFilter: RowKeyFilter,
+                                  val dynamicLogicExpression: DynamicLogicExpression)
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala
new file mode 100644 (file)
index 0000000..4c35a7b
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.util
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.spark.datasources.{BytesEncoder, JavaBytesEncoder}
+import org.apache.hadoop.hbase.spark.datasources.JavaBytesEncoder.JavaBytesEncoder
+import org.apache.hadoop.hbase.util.Bytes
+
+/**
+ * Dynamic logic for SQL push down logic there is an instance for most
+ * common operations and a pass through for other operations not covered here
+ *
+ * Logic can be nested with And or Or operators.
+ *
+ * A logic tree can be written out as a string and reconstructed from that string
+ *
+ */
+@InterfaceAudience.Private
+trait DynamicLogicExpression {
+  def execute(columnToCurrentRowValueMap: util.HashMap[String, ByteArrayComparable],
+              valueFromQueryValueArray:Array[Array[Byte]]): Boolean
+  def toExpressionString: String = {
+    val strBuilder = new StringBuilder
+    appendToExpression(strBuilder)
+    strBuilder.toString()
+  }
+  def filterOps: JavaBytesEncoder = JavaBytesEncoder.Unknown
+
+  def appendToExpression(strBuilder:StringBuilder)
+
+  var encoder: BytesEncoder = _
+
+  def setEncoder(enc: BytesEncoder): DynamicLogicExpression = {
+    encoder = enc
+    this
+  }
+}
+
+@InterfaceAudience.Private
+trait CompareTrait {
+  self: DynamicLogicExpression =>
+  def columnName: String
+  def valueFromQueryIndex: Int
+  def execute(columnToCurrentRowValueMap:
+              util.HashMap[String, ByteArrayComparable],
+              valueFromQueryValueArray:Array[Array[Byte]]): Boolean = {
+    val currentRowValue = columnToCurrentRowValueMap.get(columnName)
+    val valueFromQuery = valueFromQueryValueArray(valueFromQueryIndex)
+    currentRowValue != null &&
+      encoder.filter(currentRowValue.bytes, currentRowValue.offset, currentRowValue.length,
+        valueFromQuery, 0, valueFromQuery.length, filterOps)
+  }
+}
+
+@InterfaceAudience.Private
+class AndLogicExpression (val leftExpression:DynamicLogicExpression,
+                           val rightExpression:DynamicLogicExpression)
+  extends DynamicLogicExpression{
+  override def execute(columnToCurrentRowValueMap:
+                       util.HashMap[String, ByteArrayComparable],
+                       valueFromQueryValueArray:Array[Array[Byte]]): Boolean = {
+    leftExpression.execute(columnToCurrentRowValueMap, valueFromQueryValueArray) &&
+      rightExpression.execute(columnToCurrentRowValueMap, valueFromQueryValueArray)
+  }
+
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    strBuilder.append("( ")
+    strBuilder.append(leftExpression.toExpressionString)
+    strBuilder.append(" AND ")
+    strBuilder.append(rightExpression.toExpressionString)
+    strBuilder.append(" )")
+  }
+}
+
+@InterfaceAudience.Private
+class OrLogicExpression (val leftExpression:DynamicLogicExpression,
+                          val rightExpression:DynamicLogicExpression)
+  extends DynamicLogicExpression{
+  override def execute(columnToCurrentRowValueMap:
+                       util.HashMap[String, ByteArrayComparable],
+                       valueFromQueryValueArray:Array[Array[Byte]]): Boolean = {
+    leftExpression.execute(columnToCurrentRowValueMap, valueFromQueryValueArray) ||
+      rightExpression.execute(columnToCurrentRowValueMap, valueFromQueryValueArray)
+  }
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    strBuilder.append("( ")
+    strBuilder.append(leftExpression.toExpressionString)
+    strBuilder.append(" OR ")
+    strBuilder.append(rightExpression.toExpressionString)
+    strBuilder.append(" )")
+  }
+}
+
+@InterfaceAudience.Private
+class EqualLogicExpression (val columnName:String,
+                            val valueFromQueryIndex:Int,
+                            val isNot:Boolean) extends DynamicLogicExpression{
+  override def execute(columnToCurrentRowValueMap:
+                       util.HashMap[String, ByteArrayComparable],
+                       valueFromQueryValueArray:Array[Array[Byte]]): Boolean = {
+    val currentRowValue = columnToCurrentRowValueMap.get(columnName)
+    val valueFromQuery = valueFromQueryValueArray(valueFromQueryIndex)
+
+    currentRowValue != null &&
+      Bytes.equals(valueFromQuery,
+        0, valueFromQuery.length, currentRowValue.bytes,
+        currentRowValue.offset, currentRowValue.length) != isNot
+  }
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    val command = if (isNot) "!=" else "=="
+    strBuilder.append(columnName + " " + command + " " + valueFromQueryIndex)
+  }
+}
+
+@InterfaceAudience.Private
+class IsNullLogicExpression (val columnName:String,
+                             val isNot:Boolean) extends DynamicLogicExpression{
+  override def execute(columnToCurrentRowValueMap:
+                       util.HashMap[String, ByteArrayComparable],
+                       valueFromQueryValueArray:Array[Array[Byte]]): Boolean = {
+    val currentRowValue = columnToCurrentRowValueMap.get(columnName)
+
+    (currentRowValue == null) != isNot
+  }
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    val command = if (isNot) "isNotNull" else "isNull"
+    strBuilder.append(columnName + " " + command)
+  }
+}
+
+@InterfaceAudience.Private
+class GreaterThanLogicExpression (override val columnName:String,
+                                  override val valueFromQueryIndex:Int)
+  extends DynamicLogicExpression with CompareTrait{
+  override val filterOps = JavaBytesEncoder.Greater
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    strBuilder.append(columnName + " > " + valueFromQueryIndex)
+  }
+}
+
+@InterfaceAudience.Private
+class GreaterThanOrEqualLogicExpression (override val columnName:String,
+                                         override val valueFromQueryIndex:Int)
+  extends DynamicLogicExpression with CompareTrait{
+  override val filterOps = JavaBytesEncoder.GreaterEqual
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    strBuilder.append(columnName + " >= " + valueFromQueryIndex)
+  }
+}
+
+@InterfaceAudience.Private
+class LessThanLogicExpression (override val columnName:String,
+                               override val valueFromQueryIndex:Int)
+  extends DynamicLogicExpression with CompareTrait {
+  override val filterOps = JavaBytesEncoder.Less
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    strBuilder.append(columnName + " < " + valueFromQueryIndex)
+  }
+}
+
+@InterfaceAudience.Private
+class LessThanOrEqualLogicExpression (val columnName:String,
+                                      val valueFromQueryIndex:Int)
+  extends DynamicLogicExpression with CompareTrait{
+  override val filterOps = JavaBytesEncoder.LessEqual
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    strBuilder.append(columnName + " <= " + valueFromQueryIndex)
+  }
+}
+
+@InterfaceAudience.Private
+class PassThroughLogicExpression() extends DynamicLogicExpression {
+  override def execute(columnToCurrentRowValueMap:
+                       util.HashMap[String, ByteArrayComparable],
+                       valueFromQueryValueArray: Array[Array[Byte]]): Boolean = true
+
+  override def appendToExpression(strBuilder: StringBuilder): Unit = {
+    // Fix the offset bug by add dummy to avoid crash the region server.
+    // because in the DynamicLogicExpressionBuilder.build function, the command is always retrieved from offset + 1 as below
+    // val command = expressionArray(offSet + 1)
+    // we have to padding it so that `Pass` is on the right offset.
+    strBuilder.append("dummy Pass -1")
+  }
+}
+
+@InterfaceAudience.Private
+object DynamicLogicExpressionBuilder {
+  def build(expressionString: String, encoder: BytesEncoder): DynamicLogicExpression = {
+
+    val expressionAndOffset = build(expressionString.split(' '), 0, encoder)
+    expressionAndOffset._1
+  }
+
+  private def build(expressionArray:Array[String],
+                    offSet:Int, encoder: BytesEncoder): (DynamicLogicExpression, Int) = {
+    val expr = {
+      if (expressionArray(offSet).equals("(")) {
+        val left = build(expressionArray, offSet + 1, encoder)
+        val right = build(expressionArray, left._2 + 1, encoder)
+        if (expressionArray(left._2).equals("AND")) {
+          (new AndLogicExpression(left._1, right._1), right._2 + 1)
+        } else if (expressionArray(left._2).equals("OR")) {
+          (new OrLogicExpression(left._1, right._1), right._2 + 1)
+        } else {
+          throw new Throwable("Unknown gate:" + expressionArray(left._2))
+        }
+      } else {
+        val command = expressionArray(offSet + 1)
+        if (command.equals("<")) {
+          (new LessThanLogicExpression(expressionArray(offSet),
+            expressionArray(offSet + 2).toInt), offSet + 3)
+        } else if (command.equals("<=")) {
+          (new LessThanOrEqualLogicExpression(expressionArray(offSet),
+            expressionArray(offSet + 2).toInt), offSet + 3)
+        } else if (command.equals(">")) {
+          (new GreaterThanLogicExpression(expressionArray(offSet),
+            expressionArray(offSet + 2).toInt), offSet + 3)
+        } else if (command.equals(">=")) {
+          (new GreaterThanOrEqualLogicExpression(expressionArray(offSet),
+            expressionArray(offSet + 2).toInt), offSet + 3)
+        } else if (command.equals("==")) {
+          (new EqualLogicExpression(expressionArray(offSet),
+            expressionArray(offSet + 2).toInt, false), offSet + 3)
+        } else if (command.equals("!=")) {
+          (new EqualLogicExpression(expressionArray(offSet),
+            expressionArray(offSet + 2).toInt, true), offSet + 3)
+        } else if (command.equals("isNull")) {
+          (new IsNullLogicExpression(expressionArray(offSet), false), offSet + 2)
+        } else if (command.equals("isNotNull")) {
+          (new IsNullLogicExpression(expressionArray(offSet), true), offSet + 2)
+        } else if (command.equals("Pass")) {
+          (new PassThroughLogicExpression, offSet + 3)
+        } else {
+          throw new Throwable("Unknown logic command:" + command)
+        }
+      }
+    }
+    expr._1.setEncoder(encoder)
+    expr
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/FamiliesQualifiersValues.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/FamiliesQualifiersValues.scala
new file mode 100644 (file)
index 0000000..7a651e1
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark
+
+import java.util
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This object is a clean way to store and sort all cells that will be bulk
+ * loaded into a single row
+ */
+@InterfaceAudience.Public
+class FamiliesQualifiersValues extends Serializable {
+  //Tree maps are used because we need the results to
+  // be sorted when we read them
+  val familyMap = new util.TreeMap[ByteArrayWrapper,
+    util.TreeMap[ByteArrayWrapper, Array[Byte]]]()
+
+  //normally in a row there are more columns then
+  //column families this wrapper is reused for column
+  //family look ups
+  val reusableWrapper = new ByteArrayWrapper(null)
+
+  /**
+   * Adds a new cell to an existing row
+   * @param family    HBase column family
+   * @param qualifier HBase column qualifier
+   * @param value     HBase cell value
+   */
+  def += (family: Array[Byte], qualifier: Array[Byte], value: Array[Byte]): Unit = {
+
+    reusableWrapper.value = family
+
+    var qualifierValues = familyMap.get(reusableWrapper)
+
+    if (qualifierValues == null) {
+      qualifierValues = new util.TreeMap[ByteArrayWrapper, Array[Byte]]()
+      familyMap.put(new ByteArrayWrapper(family), qualifierValues)
+    }
+
+    qualifierValues.put(new ByteArrayWrapper(qualifier), value)
+  }
+
+  /**
+    * A wrapper for "+=" method above, can be used by Java
+    * @param family    HBase column family
+    * @param qualifier HBase column qualifier
+    * @param value     HBase cell value
+    */
+  def add(family: Array[Byte], qualifier: Array[Byte], value: Array[Byte]): Unit = {
+    this += (family, qualifier, value)
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/FamilyHFileWriteOptions.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/FamilyHFileWriteOptions.scala
new file mode 100644 (file)
index 0000000..9ee9291
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.io.Serializable
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This object will hold optional data for how a given column family's
+ * writer will work
+ *
+ * @param compression       String to define the Compression to be used in the HFile
+ * @param bloomType         String to define the bloom type to be used in the HFile
+ * @param blockSize         The block size to be used in the HFile
+ * @param dataBlockEncoding String to define the data block encoding to be used
+ *                          in the HFile
+ */
+@InterfaceAudience.Public
+class FamilyHFileWriteOptions( val compression:String,
+                               val bloomType: String,
+                               val blockSize: Int,
+                               val dataBlockEncoding: String) extends Serializable
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
new file mode 100644 (file)
index 0000000..1fc92c0
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.io.IOException
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.client.Admin
+import org.apache.hadoop.hbase.client.Connection
+import org.apache.hadoop.hbase.client.ConnectionFactory
+import org.apache.hadoop.hbase.client.RegionLocator
+import org.apache.hadoop.hbase.client.Table
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory
+import org.apache.hadoop.hbase.security.User
+import org.apache.hadoop.hbase.security.UserProvider
+import org.apache.hadoop.hbase.spark.datasources.HBaseSparkConf
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.hbase.TableName
+import org.apache.yetus.audience.InterfaceAudience
+import scala.collection.mutable
+
+@InterfaceAudience.Private
+private[spark] object HBaseConnectionCache extends Logging {
+
+  // A hashmap of Spark-HBase connections. Key is HBaseConnectionKey.
+  val connectionMap = new mutable.HashMap[HBaseConnectionKey, SmartConnection]()
+
+  val cacheStat = HBaseConnectionCacheStat(0, 0, 0)
+
+  // in milliseconds
+  private final val DEFAULT_TIME_OUT: Long = HBaseSparkConf.DEFAULT_CONNECTION_CLOSE_DELAY
+  private var timeout = DEFAULT_TIME_OUT
+  private var closed: Boolean = false
+
+  var housekeepingThread = new Thread(new Runnable {
+    override def run() {
+      while (true) {
+        try {
+          Thread.sleep(timeout)
+        } catch {
+          case e: InterruptedException =>
+            // setTimeout() and close() may interrupt the sleep and it's safe
+            // to ignore the exception
+        }
+        if (closed)
+          return
+        performHousekeeping(false)
+      }
+    }
+  })
+  housekeepingThread.setDaemon(true)
+  housekeepingThread.start()
+
+  def getStat: HBaseConnectionCacheStat = {
+    connectionMap.synchronized {
+      cacheStat.numActiveConnections = connectionMap.size
+      cacheStat.copy()
+    }
+  }
+
+  def close(): Unit = {
+    try {
+      connectionMap.synchronized {
+        if (closed)
+          return
+        closed = true
+        housekeepingThread.interrupt()
+        housekeepingThread = null
+        HBaseConnectionCache.performHousekeeping(true)
+      }
+    } catch {
+      case e: Exception => logWarning("Error in finalHouseKeeping", e)
+    }
+  }
+
+  def performHousekeeping(forceClean: Boolean) = {
+    val tsNow: Long = System.currentTimeMillis()
+    connectionMap.synchronized {
+      connectionMap.foreach {
+        x => {
+          if(x._2.refCount < 0) {
+            logError(s"Bug to be fixed: negative refCount of connection ${x._2}")
+          }
+
+          if(forceClean || ((x._2.refCount <= 0) && (tsNow - x._2.timestamp > timeout))) {
+            try{
+              x._2.connection.close()
+            } catch {
+              case e: IOException => logWarning(s"Fail to close connection ${x._2}", e)
+            }
+            connectionMap.remove(x._1)
+          }
+        }
+      }
+    }
+  }
+
+  // For testing purpose only
+  def getConnection(key: HBaseConnectionKey, conn: => Connection): SmartConnection = {
+    connectionMap.synchronized {
+      if (closed)
+        return null
+      cacheStat.numTotalRequests += 1
+      val sc = connectionMap.getOrElseUpdate(key, {cacheStat.numActualConnectionsCreated += 1
+        new SmartConnection(conn)})
+      sc.refCount += 1
+      sc
+    }
+  }
+
+  def getConnection(conf: Configuration): SmartConnection =
+    getConnection(new HBaseConnectionKey(conf), ConnectionFactory.createConnection(conf))
+
+  // For testing purpose only
+  def setTimeout(to: Long): Unit  = {
+    connectionMap.synchronized {
+      if (closed)
+        return
+      timeout = to
+      housekeepingThread.interrupt()
+    }
+  }
+}
+
+@InterfaceAudience.Private
+private[hbase] case class SmartConnection (
+    connection: Connection, var refCount: Int = 0, var timestamp: Long = 0) {
+  def getTable(tableName: TableName): Table = connection.getTable(tableName)
+  def getRegionLocator(tableName: TableName): RegionLocator = connection.getRegionLocator(tableName)
+  def isClosed: Boolean = connection.isClosed
+  def getAdmin: Admin = connection.getAdmin
+  def close() = {
+    HBaseConnectionCache.connectionMap.synchronized {
+      refCount -= 1
+      if(refCount <= 0)
+        timestamp = System.currentTimeMillis()
+    }
+  }
+}
+
+/**
+ * Denotes a unique key to an HBase Connection instance.
+ * Please refer to 'org.apache.hadoop.hbase.client.HConnectionKey'.
+ *
+ * In essence, this class captures the properties in Configuration
+ * that may be used in the process of establishing a connection.
+ *
+ */
+@InterfaceAudience.Private
+class HBaseConnectionKey(c: Configuration) extends Logging {
+  val conf: Configuration = c
+  val CONNECTION_PROPERTIES: Array[String] = Array[String](
+    HConstants.ZOOKEEPER_QUORUM,
+    HConstants.ZOOKEEPER_ZNODE_PARENT,
+    HConstants.ZOOKEEPER_CLIENT_PORT,
+    HConstants.HBASE_CLIENT_PAUSE,
+    HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+    HConstants.HBASE_RPC_TIMEOUT_KEY,
+    HConstants.HBASE_META_SCANNER_CACHING,
+    HConstants.HBASE_CLIENT_INSTANCE_ID,
+    HConstants.RPC_CODEC_CONF_KEY,
+    HConstants.USE_META_REPLICAS,
+    RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY)
+
+  var username: String = _
+  var m_properties = mutable.HashMap.empty[String, String]
+  if (conf != null) {
+    for (property <- CONNECTION_PROPERTIES) {
+      val value: String = conf.get(property)
+      if (value != null) {
+        m_properties.+=((property, value))
+      }
+    }
+    try {
+      val provider: UserProvider = UserProvider.instantiate(conf)
+      val currentUser: User = provider.getCurrent
+      if (currentUser != null) {
+        username = currentUser.getName
+      }
+    }
+    catch {
+      case e: IOException => {
+        logWarning("Error obtaining current user, skipping username in HBaseConnectionKey", e)
+      }
+    }
+  }
+
+  // make 'properties' immutable
+  val properties = m_properties.toMap
+
+  override def hashCode: Int = {
+    val prime: Int = 31
+    var result: Int = 1
+    if (username != null) {
+      result = username.hashCode
+    }
+    for (property <- CONNECTION_PROPERTIES) {
+      val value: Option[String] = properties.get(property)
+      if (value.isDefined) {
+        result = prime * result + value.hashCode
+      }
+    }
+    result
+  }
+
+  override def equals(obj: Any): Boolean = {
+    if (obj == null) return false
+    if (getClass ne obj.getClass) return false
+    val that: HBaseConnectionKey = obj.asInstanceOf[HBaseConnectionKey]
+    if (this.username != null && !(this.username == that.username)) {
+      return false
+    }
+    else if (this.username == null && that.username != null) {
+      return false
+    }
+    if (this.properties == null) {
+      if (that.properties != null) {
+        return false
+      }
+    }
+    else {
+      if (that.properties == null) {
+        return false
+      }
+      var flag: Boolean = true
+      for (property <- CONNECTION_PROPERTIES) {
+        val thisValue: Option[String] = this.properties.get(property)
+        val thatValue: Option[String] = that.properties.get(property)
+        flag = true
+        if (thisValue eq thatValue) {
+          flag = false //continue, so make flag be false
+        }
+        if (flag && (thisValue == null || !(thisValue == thatValue))) {
+          return false
+        }
+      }
+    }
+    true
+  }
+
+  override def toString: String = {
+    "HBaseConnectionKey{" + "properties=" + properties + ", username='" + username + '\'' + '}'
+  }
+}
+
+/**
+ * To log the state of 'HBaseConnectionCache'
+ *
+ * @param numTotalRequests number of total connection requests to the cache
+ * @param numActualConnectionsCreated number of actual HBase connections the cache ever created
+ * @param numActiveConnections number of current alive HBase connections the cache is holding
+ */
+@InterfaceAudience.Private
+case class HBaseConnectionCacheStat(var numTotalRequests: Long,
+                                    var numActualConnectionsCreated: Long,
+                                    var numActiveConnections: Long)
+
+
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
new file mode 100644 (file)
index 0000000..e50a3e8
--- /dev/null
@@ -0,0 +1,1126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.net.InetSocketAddress
+import java.util
+import java.util.UUID
+import javax.management.openmbean.KeyAlreadyExistsException
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.HFileSystem
+import org.apache.hadoop.hbase._
+import org.apache.hadoop.hbase.io.compress.Compression
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
+import org.apache.hadoop.hbase.io.hfile.{HFile, CacheConfig, HFileContextBuilder, HFileWriterImpl}
+import org.apache.hadoop.hbase.regionserver.{HStore, HStoreFile, StoreFileWriter, BloomType}
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.mapred.JobConf
+import org.apache.spark.broadcast.Broadcast
+import org.apache.spark.deploy.SparkHadoopUtil
+import org.apache.spark.rdd.RDD
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
+import org.apache.hadoop.hbase.client._
+import scala.reflect.ClassTag
+import org.apache.spark.{SerializableWritable, SparkContext}
+import org.apache.hadoop.hbase.mapreduce.{TableMapReduceUtil,
+TableInputFormat, IdentityTableMapper}
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.hadoop.mapreduce.Job
+import org.apache.spark.streaming.dstream.DStream
+import java.io._
+import org.apache.hadoop.security.UserGroupInformation
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod
+import org.apache.hadoop.fs.{Path, FileAlreadyExistsException, FileSystem}
+import scala.collection.mutable
+
+/**
+  * HBaseContext is a fa├žade for HBase operations
+  * like bulk put, get, increment, delete, and scan
+  *
+  * HBaseContext will take the responsibilities
+  * of disseminating the configuration information
+  * to the working and managing the life cycle of Connections.
+ */
+@InterfaceAudience.Public
+class HBaseContext(@transient val sc: SparkContext,
+                   @transient val config: Configuration,
+                   val tmpHdfsConfgFile: String = null)
+  extends Serializable with Logging {
+
+  @transient var credentials = UserGroupInformation.getCurrentUser().getCredentials()
+  @transient var tmpHdfsConfiguration:Configuration = config
+  @transient var appliedCredentials = false
+  @transient val job = Job.getInstance(config)
+  TableMapReduceUtil.initCredentials(job)
+  val broadcastedConf = sc.broadcast(new SerializableWritable(config))
+  val credentialsConf = sc.broadcast(new SerializableWritable(job.getCredentials))
+
+  LatestHBaseContextCache.latest = this
+
+  if (tmpHdfsConfgFile != null && config != null) {
+    val fs = FileSystem.newInstance(config)
+    val tmpPath = new Path(tmpHdfsConfgFile)
+    if (!fs.exists(tmpPath)) {
+      val outputStream = fs.create(tmpPath)
+      config.write(outputStream)
+      outputStream.close()
+    } else {
+      logWarning("tmpHdfsConfigDir " + tmpHdfsConfgFile + " exist!!")
+    }
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark RDD foreachPartition.
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * @param rdd  Original RDD with data to iterate over
+   * @param f    Function to be given a iterator to iterate through
+   *             the RDD values and a Connection object to interact
+   *             with HBase
+   */
+  def foreachPartition[T](rdd: RDD[T],
+                          f: (Iterator[T], Connection) => Unit):Unit = {
+    rdd.foreachPartition(
+      it => hbaseForeachPartition(broadcastedConf, it, f))
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark Streaming dStream foreach
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * @param dstream  Original DStream with data to iterate over
+   * @param f        Function to be given a iterator to iterate through
+   *                 the DStream values and a Connection object to
+   *                 interact with HBase
+   */
+  def foreachPartition[T](dstream: DStream[T],
+                    f: (Iterator[T], Connection) => Unit):Unit = {
+    dstream.foreachRDD((rdd, time) => {
+      foreachPartition(rdd, f)
+    })
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark RDD mapPartition.
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * @param rdd  Original RDD with data to iterate over
+   * @param mp   Function to be given a iterator to iterate through
+   *             the RDD values and a Connection object to interact
+   *             with HBase
+   * @return     Returns a new RDD generated by the user definition
+   *             function just like normal mapPartition
+   */
+  def mapPartitions[T, R: ClassTag](rdd: RDD[T],
+                                   mp: (Iterator[T], Connection) => Iterator[R]): RDD[R] = {
+
+    rdd.mapPartitions[R](it => hbaseMapPartition[T, R](broadcastedConf,
+      it,
+      mp))
+
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark Streaming DStream
+   * foreachPartition.
+   *
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * Note: Make sure to partition correctly to avoid memory issue when
+   *       getting data from HBase
+   *
+   * @param dstream  Original DStream with data to iterate over
+   * @param f       Function to be given a iterator to iterate through
+   *                 the DStream values and a Connection object to
+   *                 interact with HBase
+   * @return         Returns a new DStream generated by the user
+   *                 definition function just like normal mapPartition
+   */
+  def streamForeachPartition[T](dstream: DStream[T],
+                                f: (Iterator[T], Connection) => Unit): Unit = {
+
+    dstream.foreachRDD(rdd => this.foreachPartition(rdd, f))
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark Streaming DStream
+   * mapPartition.
+   *
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * Note: Make sure to partition correctly to avoid memory issue when
+   *       getting data from HBase
+   *
+   * @param dstream  Original DStream with data to iterate over
+   * @param f       Function to be given a iterator to iterate through
+   *                 the DStream values and a Connection object to
+   *                 interact with HBase
+   * @return         Returns a new DStream generated by the user
+   *                 definition function just like normal mapPartition
+   */
+  def streamMapPartitions[T, U: ClassTag](dstream: DStream[T],
+                                f: (Iterator[T], Connection) => Iterator[U]):
+  DStream[U] = {
+    dstream.mapPartitions(it => hbaseMapPartition[T, U](
+      broadcastedConf,
+      it,
+      f))
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.foreachPartition method.
+   *
+   * It allow addition support for a user to take RDD
+   * and generate puts and send them to HBase.
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param rdd       Original RDD with data to iterate over
+   * @param tableName The name of the table to put into
+   * @param f         Function to convert a value in the RDD to a HBase Put
+   */
+  def bulkPut[T](rdd: RDD[T], tableName: TableName, f: (T) => Put) {
+
+    val tName = tableName.getName
+    rdd.foreachPartition(
+      it => hbaseForeachPartition[T](
+        broadcastedConf,
+        it,
+        (iterator, connection) => {
+          val m = connection.getBufferedMutator(TableName.valueOf(tName))
+          iterator.foreach(T => m.mutate(f(T)))
+          m.flush()
+          m.close()
+        }))
+  }
+
+  def applyCreds[T] (){
+    credentials = UserGroupInformation.getCurrentUser().getCredentials()
+
+    if (log.isDebugEnabled) {
+      logDebug("appliedCredentials:" + appliedCredentials + ",credentials:" + credentials)
+    }
+
+    if (!appliedCredentials && credentials != null) {
+      appliedCredentials = true
+
+      @transient val ugi = UserGroupInformation.getCurrentUser
+      ugi.addCredentials(credentials)
+      // specify that this is a proxy user
+      ugi.setAuthenticationMethod(AuthenticationMethod.PROXY)
+
+      ugi.addCredentials(credentialsConf.value.value)
+    }
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.streamMapPartition method.
+   *
+   * It allow addition support for a user to take a DStream and
+   * generate puts and send them to HBase.
+   *
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param dstream    Original DStream with data to iterate over
+   * @param tableName  The name of the table to put into
+   * @param f          Function to convert a value in
+   *                   the DStream to a HBase Put
+   */
+  def streamBulkPut[T](dstream: DStream[T],
+                       tableName: TableName,
+                       f: (T) => Put) = {
+    val tName = tableName.getName
+    dstream.foreachRDD((rdd, time) => {
+      bulkPut(rdd, TableName.valueOf(tName), f)
+    })
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.foreachPartition method.
+   *
+   * It allow addition support for a user to take a RDD and generate delete
+   * and send them to HBase.  The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param rdd       Original RDD with data to iterate over
+   * @param tableName The name of the table to delete from
+   * @param f         Function to convert a value in the RDD to a
+   *                  HBase Deletes
+   * @param batchSize       The number of delete to batch before sending to HBase
+   */
+  def bulkDelete[T](rdd: RDD[T], tableName: TableName,
+                    f: (T) => Delete, batchSize: Integer) {
+    bulkMutation(rdd, tableName, f, batchSize)
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.streamBulkMutation method.
+   *
+   * It allow addition support for a user to take a DStream and
+   * generate Delete and send them to HBase.
+   *
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param dstream    Original DStream with data to iterate over
+   * @param tableName  The name of the table to delete from
+   * @param f          function to convert a value in the DStream to a
+   *                   HBase Delete
+   * @param batchSize        The number of deletes to batch before sending to HBase
+   */
+  def streamBulkDelete[T](dstream: DStream[T],
+                          tableName: TableName,
+                          f: (T) => Delete,
+                          batchSize: Integer) = {
+    streamBulkMutation(dstream, tableName, f, batchSize)
+  }
+
+  /**
+   *  Under lining function to support all bulk mutations
+   *
+   *  May be opened up if requested
+   */
+  private def bulkMutation[T](rdd: RDD[T], tableName: TableName,
+                              f: (T) => Mutation, batchSize: Integer) {
+
+    val tName = tableName.getName
+    rdd.foreachPartition(
+      it => hbaseForeachPartition[T](
+        broadcastedConf,
+        it,
+        (iterator, connection) => {
+          val table = connection.getTable(TableName.valueOf(tName))
+          val mutationList = new java.util.ArrayList[Mutation]
+          iterator.foreach(T => {
+            mutationList.add(f(T))
+            if (mutationList.size >= batchSize) {
+              table.batch(mutationList, null)
+              mutationList.clear()
+            }
+          })
+          if (mutationList.size() > 0) {
+            table.batch(mutationList, null)
+            mutationList.clear()
+          }
+          table.close()
+        }))
+  }
+
+  /**
+   *  Under lining function to support all bulk streaming mutations
+   *
+   *  May be opened up if requested
+   */
+  private def streamBulkMutation[T](dstream: DStream[T],
+                                    tableName: TableName,
+                                    f: (T) => Mutation,
+                                    batchSize: Integer) = {
+    val tName = tableName.getName
+    dstream.foreachRDD((rdd, time) => {
+      bulkMutation(rdd, TableName.valueOf(tName), f, batchSize)
+    })
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.mapPartition method.
+   *
+   * It allow addition support for a user to take a RDD and generates a
+   * new RDD based on Gets and the results they bring back from HBase
+   *
+   * @param rdd     Original RDD with data to iterate over
+   * @param tableName        The name of the table to get from
+   * @param makeGet    function to convert a value in the RDD to a
+   *                   HBase Get
+   * @param convertResult This will convert the HBase Result object to
+   *                   what ever the user wants to put in the resulting
+   *                   RDD
+   * return            new RDD that is created by the Get to HBase
+   */
+  def bulkGet[T, U: ClassTag](tableName: TableName,
+                    batchSize: Integer,
+                    rdd: RDD[T],
+                    makeGet: (T) => Get,
+                    convertResult: (Result) => U): RDD[U] = {
+
+    val getMapPartition = new GetMapPartition(tableName,
+      batchSize,
+      makeGet,
+      convertResult)
+
+    rdd.mapPartitions[U](it =>
+      hbaseMapPartition[T, U](
+        broadcastedConf,
+        it,
+        getMapPartition.run))
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.streamMap method.
+   *
+   * It allow addition support for a user to take a DStream and
+   * generates a new DStream based on Gets and the results
+   * they bring back from HBase
+   *
+   * @param tableName     The name of the table to get from
+   * @param batchSize     The number of Gets to be sent in a single batch
+   * @param dStream       Original DStream with data to iterate over
+   * @param makeGet       Function to convert a value in the DStream to a
+   *                      HBase Get
+   * @param convertResult This will convert the HBase Result object to
+   *                      what ever the user wants to put in the resulting
+   *                      DStream
+   * @return              A new DStream that is created by the Get to HBase
+   */
+  def streamBulkGet[T, U: ClassTag](tableName: TableName,
+                                    batchSize: Integer,
+                                    dStream: DStream[T],
+                                    makeGet: (T) => Get,
+                                    convertResult: (Result) => U): DStream[U] = {
+
+    val getMapPartition = new GetMapPartition(tableName,
+      batchSize,
+      makeGet,
+      convertResult)
+
+    dStream.mapPartitions[U](it => hbaseMapPartition[T, U](
+      broadcastedConf,
+      it,
+      getMapPartition.run))
+  }
+
+  /**
+   * This function will use the native HBase TableInputFormat with the
+   * given scan object to generate a new RDD
+   *
+   *  @param tableName the name of the table to scan
+   *  @param scan      the HBase scan object to use to read data from HBase
+   *  @param f         function to convert a Result object from HBase into
+   *                   what the user wants in the final generated RDD
+   *  @return          new RDD with results from scan
+   */
+  def hbaseRDD[U: ClassTag](tableName: TableName, scan: Scan,
+                            f: ((ImmutableBytesWritable, Result)) => U): RDD[U] = {
+
+    val job: Job = Job.getInstance(getConf(broadcastedConf))
+
+    TableMapReduceUtil.initCredentials(job)
+    TableMapReduceUtil.initTableMapperJob(tableName, scan,
+      classOf[IdentityTableMapper], null, null, job)
+
+    val jconf = new JobConf(job.getConfiguration)
+    SparkHadoopUtil.get.addCredentials(jconf)
+    new NewHBaseRDD(sc,
+      classOf[TableInputFormat],
+      classOf[ImmutableBytesWritable],
+      classOf[Result],
+      job.getConfiguration,
+      this).map(f)
+  }
+
+  /**
+   * A overloaded version of HBaseContext hbaseRDD that defines the
+   * type of the resulting RDD
+   *
+   *  @param tableName the name of the table to scan
+   *  @param scans     the HBase scan object to use to read data from HBase
+   *  @return          New RDD with results from scan
+   *
+   */
+  def hbaseRDD(tableName: TableName, scans: Scan):
+  RDD[(ImmutableBytesWritable, Result)] = {
+
+    hbaseRDD[(ImmutableBytesWritable, Result)](
+      tableName,
+      scans,
+      (r: (ImmutableBytesWritable, Result)) => r)
+  }
+
+  /**
+   *  underlining wrapper all foreach functions in HBaseContext
+   */
+  private def hbaseForeachPartition[T](configBroadcast:
+                                       Broadcast[SerializableWritable[Configuration]],
+                                        it: Iterator[T],
+                                        f: (Iterator[T], Connection) => Unit) = {
+
+    val config = getConf(configBroadcast)
+
+    applyCreds
+    // specify that this is a proxy user
+    val smartConn = HBaseConnectionCache.getConnection(config)
+    f(it, smartConn.connection)
+    smartConn.close()
+  }
+
+  private def getConf(configBroadcast: Broadcast[SerializableWritable[Configuration]]):
+  Configuration = {
+
+    if (tmpHdfsConfiguration == null && tmpHdfsConfgFile != null) {
+      val fs = FileSystem.newInstance(SparkHadoopUtil.get.conf)
+      val inputStream = fs.open(new Path(tmpHdfsConfgFile))
+      tmpHdfsConfiguration = new Configuration(false)
+      tmpHdfsConfiguration.readFields(inputStream)
+      inputStream.close()
+    }
+
+    if (tmpHdfsConfiguration == null) {
+      try {
+        tmpHdfsConfiguration = configBroadcast.value.value
+      } catch {
+        case ex: Exception => logError("Unable to getConfig from broadcast", ex)
+      }
+    }
+    tmpHdfsConfiguration
+  }
+
+  /**
+   *  underlining wrapper all mapPartition functions in HBaseContext
+   *
+   */
+  private def hbaseMapPartition[K, U](
+                                       configBroadcast:
+                                       Broadcast[SerializableWritable[Configuration]],
+                                       it: Iterator[K],
+                                       mp: (Iterator[K], Connection) =>
+                                         Iterator[U]): Iterator[U] = {
+
+    val config = getConf(configBroadcast)
+    applyCreds
+
+    val smartConn = HBaseConnectionCache.getConnection(config)
+    val res = mp(it, smartConn.connection)
+    smartConn.close()
+    res
+  }
+
+  /**
+   *  underlining wrapper all get mapPartition functions in HBaseContext
+   */
+  private class GetMapPartition[T, U](tableName: TableName,
+                                      batchSize: Integer,
+                                      makeGet: (T) => Get,
+                                      convertResult: (Result) => U)
+    extends Serializable {
+
+    val tName = tableName.getName
+
+    def run(iterator: Iterator[T], connection: Connection): Iterator[U] = {
+      val table = connection.getTable(TableName.valueOf(tName))
+
+      val gets = new java.util.ArrayList[Get]()
+      var res = List[U]()
+
+      while (iterator.hasNext) {
+        gets.add(makeGet(iterator.next()))
+
+        if (gets.size() == batchSize) {
+          val results = table.get(gets)
+          res = res ++ results.map(convertResult)
+          gets.clear()
+        }
+      }
+      if (gets.size() > 0) {
+        val results = table.get(gets)
+        res = res ++ results.map(convertResult)
+        gets.clear()
+      }
+      table.close()
+      res.iterator
+    }
+  }
+
+  /**
+   * Produces a ClassTag[T], which is actually just a casted ClassTag[AnyRef].
+   *
+   * This method is used to keep ClassTags out of the external Java API, as
+   * the Java compiler cannot produce them automatically. While this
+   * ClassTag-faking does please the compiler, it can cause problems at runtime
+   * if the Scala API relies on ClassTags for correctness.
+   *
+   * Often, though, a ClassTag[AnyRef] will not lead to incorrect behavior,
+   * just worse performance or security issues.
+   * For instance, an Array of AnyRef can hold any type T, but may lose primitive
+   * specialization.
+   */
+  private[spark]
+  def fakeClassTag[T]: ClassTag[T] = ClassTag.AnyRef.asInstanceOf[ClassTag[T]]
+
+  /**
+   * Spark Implementation of HBase Bulk load for wide rows or when
+   * values are not already combined at the time of the map process
+   *
+   * This will take the content from an existing RDD then sort and shuffle
+   * it with respect to region splits.  The result of that sort and shuffle
+   * will be written to HFiles.
+   *
+   * After this function is executed the user will have to call
+   * LoadIncrementalHFiles.doBulkLoad(...) to move the files into HBase
+   *
+   * Also note this version of bulk load is different from past versions in
+   * that it includes the qualifier as part of the sort process. The
+   * reason for this is to be able to support rows will very large number
+   * of columns.
+   *
+   * @param rdd                            The RDD we are bulk loading from
+   * @param tableName                      The HBase table we are loading into
+   * @param flatMap                        A flapMap function that will make every
+   *                                       row in the RDD
+   *                                       into N cells for the bulk load
+   * @param stagingDir                     The location on the FileSystem to bulk load into
+   * @param familyHFileWriteOptionsMap     Options that will define how the HFile for a
+   *                                       column family is written
+   * @param compactionExclude              Compaction excluded for the HFiles
+   * @param maxSize                        Max size for the HFiles before they roll
+   * @tparam T                             The Type of values in the original RDD
+   */
+  def bulkLoad[T](rdd:RDD[T],
+                  tableName: TableName,
+                  flatMap: (T) => Iterator[(KeyFamilyQualifier, Array[Byte])],
+                  stagingDir:String,
+                  familyHFileWriteOptionsMap:
+                  util.Map[Array[Byte], FamilyHFileWriteOptions] =
+                  new util.HashMap[Array[Byte], FamilyHFileWriteOptions],
+                  compactionExclude: Boolean = false,
+                  maxSize:Long = HConstants.DEFAULT_MAX_FILE_SIZE):
+  Unit = {
+    val stagingPath = new Path(stagingDir)
+    val fs = stagingPath.getFileSystem(config)
+    if (fs.exists(stagingPath)) {
+      throw new FileAlreadyExistsException("Path " + stagingDir + " already exists")
+    }
+    val conn = HBaseConnectionCache.getConnection(config)
+    try {
+      val regionLocator = conn.getRegionLocator(tableName)
+      val startKeys = regionLocator.getStartKeys
+      if (startKeys.length == 0) {
+        logInfo("Table " + tableName.toString + " was not found")
+      }
+      val defaultCompressionStr = config.get("hfile.compression",
+        Compression.Algorithm.NONE.getName)
+      val hfileCompression = HFileWriterImpl
+        .compressionByName(defaultCompressionStr)
+      val nowTimeStamp = System.currentTimeMillis()
+      val tableRawName = tableName.getName
+
+      val familyHFileWriteOptionsMapInternal =
+        new util.HashMap[ByteArrayWrapper, FamilyHFileWriteOptions]
+
+      val entrySetIt = familyHFileWriteOptionsMap.entrySet().iterator()
+
+      while (entrySetIt.hasNext) {
+        val entry = entrySetIt.next()
+        familyHFileWriteOptionsMapInternal.put(new ByteArrayWrapper(entry.getKey), entry.getValue)
+      }
+
+      val regionSplitPartitioner =
+        new BulkLoadPartitioner(startKeys)
+
+      //This is where all the magic happens
+      //Here we are going to do the following things
+      // 1. FlapMap every row in the RDD into key column value tuples
+      // 2. Then we are going to repartition sort and shuffle
+      // 3. Finally we are going to write out our HFiles
+      rdd.flatMap( r => flatMap(r)).
+        repartitionAndSortWithinPartitions(regionSplitPartitioner).
+        hbaseForeachPartition(this, (it, conn) => {
+
+          val conf = broadcastedConf.value.value
+          val fs = FileSystem.get(conf)
+          val writerMap = new mutable.HashMap[ByteArrayWrapper, WriterLength]
+          var previousRow:Array[Byte] = HConstants.EMPTY_BYTE_ARRAY
+          var rollOverRequested = false
+          val localTableName = TableName.valueOf(tableRawName)
+
+          //Here is where we finally iterate through the data in this partition of the
+          //RDD that has been sorted and partitioned
+          it.foreach{ case (keyFamilyQualifier, cellValue:Array[Byte]) =>
+
+            val wl = writeValueToHFile(keyFamilyQualifier.rowKey,
+              keyFamilyQualifier.family,
+              keyFamilyQualifier.qualifier,
+              cellValue,
+              nowTimeStamp,
+              fs,
+              conn,
+              localTableName,
+              conf,
+              familyHFileWriteOptionsMapInternal,
+              hfileCompression,
+              writerMap,
+              stagingDir)
+
+            rollOverRequested = rollOverRequested || wl.written > maxSize
+
+            //This will only roll if we have at least one column family file that is
+            //bigger then maxSize and we have finished a given row key
+            if (rollOverRequested && Bytes.compareTo(previousRow, keyFamilyQualifier.rowKey) != 0) {
+              rollWriters(fs, writerMap,
+                regionSplitPartitioner,
+                previousRow,
+                compactionExclude)
+              rollOverRequested = false
+            }
+
+            previousRow = keyFamilyQualifier.rowKey
+          }
+          //We have finished all the data so lets close up the writers
+          rollWriters(fs, writerMap,
+            regionSplitPartitioner,
+            previousRow,
+            compactionExclude)
+          rollOverRequested = false
+        })
+    } finally {
+      if(null != conn) conn.close()
+    }
+  }
+
+  /**
+   * Spark Implementation of HBase Bulk load for short rows some where less then
+   * a 1000 columns.  This bulk load should be faster for tables will thinner
+   * rows then the other spark implementation of bulk load that puts only one
+   * value into a record going into a shuffle
+   *
+   * This will take the content from an existing RDD then sort and shuffle
+   * it with respect to region splits.  The result of that sort and shuffle
+   * will be written to HFiles.
+   *
+   * After this function is executed the user will have to call
+   * LoadIncrementalHFiles.doBulkLoad(...) to move the files into HBase
+   *
+   * In this implementation, only the rowKey is given to the shuffle as the key
+   * and all the columns are already linked to the RowKey before the shuffle
+   * stage.  The sorting of the qualifier is done in memory out side of the
+   * shuffle stage
+   *
+   * Also make sure that incoming RDDs only have one record for every row key.
+   *
+   * @param rdd                            The RDD we are bulk loading from
+   * @param tableName                      The HBase table we are loading into
+   * @param mapFunction                    A function that will convert the RDD records to
+   *                                       the key value format used for the shuffle to prep
+   *                                       for writing to the bulk loaded HFiles
+   * @param stagingDir                     The location on the FileSystem to bulk load into
+   * @param familyHFileWriteOptionsMap     Options that will define how the HFile for a
+   *                                       column family is written
+   * @param compactionExclude              Compaction excluded for the HFiles
+   * @param maxSize                        Max size for the HFiles before they roll
+   * @tparam T                             The Type of values in the original RDD
+   */
+  def bulkLoadThinRows[T](rdd:RDD[T],
+                  tableName: TableName,
+                  mapFunction: (T) =>
+                    (ByteArrayWrapper, FamiliesQualifiersValues),
+                  stagingDir:String,
+                  familyHFileWriteOptionsMap:
+                  util.Map[Array[Byte], FamilyHFileWriteOptions] =
+                  new util.HashMap[Array[Byte], FamilyHFileWriteOptions],
+                  compactionExclude: Boolean = false,
+                  maxSize:Long = HConstants.DEFAULT_MAX_FILE_SIZE):
+  Unit = {
+    val stagingPath = new Path(stagingDir)
+    val fs = stagingPath.getFileSystem(config)
+    if (fs.exists(stagingPath)) {
+      throw new FileAlreadyExistsException("Path " + stagingDir + " already exists")
+    }
+    val conn = HBaseConnectionCache.getConnection(config)
+    try {
+      val regionLocator = conn.getRegionLocator(tableName)
+      val startKeys = regionLocator.getStartKeys
+      if (startKeys.length == 0) {
+        logInfo("Table " + tableName.toString + " was not found")
+      }
+      val defaultCompressionStr = config.get("hfile.compression",
+        Compression.Algorithm.NONE.getName)
+      val defaultCompression = HFileWriterImpl
+        .compressionByName(defaultCompressionStr)
+      val nowTimeStamp = System.currentTimeMillis()
+      val tableRawName = tableName.getName
+
+      val familyHFileWriteOptionsMapInternal =
+        new util.HashMap[ByteArrayWrapper, FamilyHFileWriteOptions]
+
+      val entrySetIt = familyHFileWriteOptionsMap.entrySet().iterator()
+
+      while (entrySetIt.hasNext) {
+        val entry = entrySetIt.next()
+        familyHFileWriteOptionsMapInternal.put(new ByteArrayWrapper(entry.getKey), entry.getValue)
+      }
+
+      val regionSplitPartitioner =
+        new BulkLoadPartitioner(startKeys)
+
+      //This is where all the magic happens
+      //Here we are going to do the following things
+      // 1. FlapMap every row in the RDD into key column value tuples
+      // 2. Then we are going to repartition sort and shuffle
+      // 3. Finally we are going to write out our HFiles
+      rdd.map( r => mapFunction(r)).
+        repartitionAndSortWithinPartitions(regionSplitPartitioner).
+        hbaseForeachPartition(this, (it, conn) => {
+
+          val conf = broadcastedConf.value.value
+          val fs = FileSystem.get(conf)
+          val writerMap = new mutable.HashMap[ByteArrayWrapper, WriterLength]
+          var previousRow:Array[Byte] = HConstants.EMPTY_BYTE_ARRAY
+          var rollOverRequested = false
+          val localTableName = TableName.valueOf(tableRawName)
+
+          //Here is where we finally iterate through the data in this partition of the
+          //RDD that has been sorted and partitioned
+          it.foreach{ case (rowKey:ByteArrayWrapper,
+          familiesQualifiersValues:FamiliesQualifiersValues) =>
+
+
+            if (Bytes.compareTo(previousRow, rowKey.value) == 0) {
+              throw new KeyAlreadyExistsException("The following key was sent to the " +
+                "HFile load more then one: " + Bytes.toString(previousRow))
+            }
+
+            //The family map is a tree map so the families will be sorted
+            val familyIt = familiesQualifiersValues.familyMap.entrySet().iterator()
+            while (familyIt.hasNext) {
+              val familyEntry = familyIt.next()
+
+              val family = familyEntry.getKey.value
+
+              val qualifierIt = familyEntry.getValue.entrySet().iterator()
+
+              //The qualifier map is a tree map so the families will be sorted
+              while (qualifierIt.hasNext) {
+
+                val qualifierEntry = qualifierIt.next()
+                val qualifier = qualifierEntry.getKey
+                val cellValue = qualifierEntry.getValue
+
+                writeValueToHFile(rowKey.value,
+                  family,
+                  qualifier.value, // qualifier
+                  cellValue, // value
+                  nowTimeStamp,
+                  fs,
+                  conn,
+                  localTableName,
+                  conf,
+                  familyHFileWriteOptionsMapInternal,
+                  defaultCompression,
+                  writerMap,
+                  stagingDir)
+
+                previousRow = rowKey.value
+              }
+
+              writerMap.values.foreach( wl => {
+                rollOverRequested = rollOverRequested || wl.written > maxSize
+
+                //This will only roll if we have at least one column family file that is
+                //bigger then maxSize and we have finished a given row key
+                if (rollOverRequested) {
+                  rollWriters(fs, writerMap,
+                    regionSplitPartitioner,
+                    previousRow,
+                    compactionExclude)
+                  rollOverRequested = false
+                }
+              })
+            }
+          }
+
+          //This will get a writer for the column family
+          //If there is no writer for a given column family then
+          //it will get created here.
+          //We have finished all the data so lets close up the writers
+          rollWriters(fs, writerMap,
+            regionSplitPartitioner,
+            previousRow,
+            compactionExclude)
+          rollOverRequested = false
+        })
+    } finally {
+      if(null != conn) conn.close()
+    }
+  }
+
+  /**
+   *  This will return a new HFile writer when requested
+   *
+   * @param family       column family
+   * @param conf         configuration to connect to HBase
+   * @param favoredNodes nodes that we would like to write too
+   * @param fs           FileSystem object where we will be writing the HFiles to
+   * @return WriterLength object
+   */
+  private def getNewHFileWriter(family: Array[Byte], conf: Configuration,
+                   favoredNodes: Array[InetSocketAddress],
+                   fs:FileSystem,
+                   familydir:Path,
+                   familyHFileWriteOptionsMapInternal:
+                   util.HashMap[ByteArrayWrapper, FamilyHFileWriteOptions],
+                   defaultCompression:Compression.Algorithm): WriterLength = {
+
+
+    var familyOptions = familyHFileWriteOptionsMapInternal.get(new ByteArrayWrapper(family))
+
+    if (familyOptions == null) {
+      familyOptions = new FamilyHFileWriteOptions(defaultCompression.toString,
+        BloomType.NONE.toString, HConstants.DEFAULT_BLOCKSIZE, DataBlockEncoding.NONE.toString)
+      familyHFileWriteOptionsMapInternal.put(new ByteArrayWrapper(family), familyOptions)
+    }
+
+    val tempConf = new Configuration(conf)
+    tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f)
+    val contextBuilder = new HFileContextBuilder()
+      .withCompression(Algorithm.valueOf(familyOptions.compression))
+      .withChecksumType(HStore.getChecksumType(conf))
+      .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
+      .withBlockSize(familyOptions.blockSize)
+
+    if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
+      contextBuilder.withIncludesTags(true)
+    }
+
+    contextBuilder.withDataBlockEncoding(DataBlockEncoding.
+      valueOf(familyOptions.dataBlockEncoding))
+    val hFileContext = contextBuilder.build()
+
+    //Add a '_' to the file name because this is a unfinished file.  A rename will happen
+    // to remove the '_' when the file is closed.
+    new WriterLength(0,
+      new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs))
+        .withBloomType(BloomType.valueOf(familyOptions.bloomType))
+        .withComparator(CellComparator.getInstance()).withFileContext(hFileContext)
+        .withFilePath(new Path(familydir, "_" + UUID.randomUUID.toString.replaceAll("-", "")))
+        .withFavoredNodes(favoredNodes).build())
+
+  }
+
+  /**
+   * Encompasses the logic to write a value to an HFile
+   *
+   * @param rowKey                             The RowKey for the record
+   * @param family                             HBase column family for the record
+   * @param qualifier                          HBase column qualifier for the record
+   * @param cellValue                          HBase cell value
+   * @param nowTimeStamp                       The cell time stamp
+   * @param fs                                 Connection to the FileSystem for the HFile
+   * @param conn                               Connection to HBaes
+   * @param tableName                          HBase TableName object
+   * @param conf                               Configuration to be used when making a new HFile
+   * @param familyHFileWriteOptionsMapInternal Extra configs for the HFile
+   * @param hfileCompression                   The compression codec for the new HFile
+   * @param writerMap                          HashMap of existing writers and their offsets
+   * @param stagingDir                         The staging directory on the FileSystem to store
+   *                                           the HFiles
+   * @return                                   The writer for the given HFile that was writen
+   *                                           too
+   */
+  private def writeValueToHFile(rowKey: Array[Byte],
+                        family: Array[Byte],
+                        qualifier: Array[Byte],
+                        cellValue:Array[Byte],
+                        nowTimeStamp: Long,
+                        fs: FileSystem,
+                        conn: Connection,
+                        tableName: TableName,
+                        conf: Configuration,
+                        familyHFileWriteOptionsMapInternal:
+                        util.HashMap[ByteArrayWrapper, FamilyHFileWriteOptions],
+                        hfileCompression:Compression.Algorithm,
+                        writerMap:mutable.HashMap[ByteArrayWrapper, WriterLength],
+                        stagingDir: String
+                         ): WriterLength = {
+
+    val wl = writerMap.getOrElseUpdate(new ByteArrayWrapper(family), {
+      val familyDir = new Path(stagingDir, Bytes.toString(family))
+
+      fs.mkdirs(familyDir)
+
+      val loc:HRegionLocation = {
+        try {
+          val locator =
+            conn.getRegionLocator(tableName)
+          locator.getRegionLocation(rowKey)
+        } catch {
+          case e: Throwable =>
+            logWarning("there's something wrong when locating rowkey: " +
+              Bytes.toString(rowKey))
+            null
+        }
+      }
+      if (null == loc) {
+        if (log.isTraceEnabled) {
+          logTrace("failed to get region location, so use default writer: " +
+            Bytes.toString(rowKey))
+        }
+        getNewHFileWriter(family = family,
+          conf = conf,
+          favoredNodes = null,
+          fs = fs,
+          familydir = familyDir,
+          familyHFileWriteOptionsMapInternal,
+          hfileCompression)
+      } else {
+        if (log.isDebugEnabled) {
+          logDebug("first rowkey: [" + Bytes.toString(rowKey) + "]")
+        }
+        val initialIsa =
+          new InetSocketAddress(loc.getHostname, loc.getPort)
+        if (initialIsa.isUnresolved) {
+          if (log.isTraceEnabled) {
+            logTrace("failed to resolve bind address: " + loc.getHostname + ":"
+              + loc.getPort + ", so use default writer")
+          }
+          getNewHFileWriter(family,
+            conf,
+            null,
+            fs,
+            familyDir,
+            familyHFileWriteOptionsMapInternal,
+            hfileCompression)
+        } else {
+          if(log.isDebugEnabled) {
+            logDebug("use favored nodes writer: " + initialIsa.getHostString)
+          }
+          getNewHFileWriter(family,
+            conf,
+            Array[InetSocketAddress](initialIsa),
+            fs,
+            familyDir,
+            familyHFileWriteOptionsMapInternal,
+            hfileCompression)
+        }
+      }
+    })
+
+    val keyValue =new KeyValue(rowKey,
+      family,
+      qualifier,
+      nowTimeStamp,cellValue)
+
+    wl.writer.append(keyValue)
+    wl.written += keyValue.getLength
+
+    wl
+  }
+
+  /**
+   * This will roll all Writers
+   * @param fs                     Hadoop FileSystem object
+   * @param writerMap              HashMap that contains all the writers
+   * @param regionSplitPartitioner The partitioner with knowledge of how the
+   *                               Region's are split by row key
+   * @param previousRow            The last row to fill the HFile ending range metadata
+   * @param compactionExclude      The exclude compaction metadata flag for the HFile
+   */
+  private def rollWriters(fs:FileSystem,
+                          writerMap:mutable.HashMap[ByteArrayWrapper, WriterLength],
+                  regionSplitPartitioner: BulkLoadPartitioner,
+                  previousRow: Array[Byte],
+                  compactionExclude: Boolean): Unit = {
+    writerMap.values.foreach( wl => {
+      if (wl.writer != null) {
+        logDebug("Writer=" + wl.writer.getPath +
+          (if (wl.written == 0) "" else ", wrote=" + wl.written))
+        closeHFileWriter(fs, wl.writer,
+          regionSplitPartitioner,
+          previousRow,
+          compactionExclude)
+      }
+    })
+    writerMap.clear()
+
+  }
+
+  /**
+   * Function to close an HFile
+   * @param fs                     Hadoop FileSystem object
+   * @param w                      HFile Writer
+   * @param regionSplitPartitioner The partitioner with knowledge of how the
+   *                               Region's are split by row key
+   * @param previousRow            The last row to fill the HFile ending range metadata
+   * @param compactionExclude      The exclude compaction metadata flag for the HFile
+   */
+  private def closeHFileWriter(fs:FileSystem,
+                               w: StoreFileWriter,
+                               regionSplitPartitioner: BulkLoadPartitioner,
+                               previousRow: Array[Byte],
+                               compactionExclude: Boolean): Unit = {
+    if (w != null) {
+      w.appendFileInfo(HStoreFile.BULKLOAD_TIME_KEY,
+        Bytes.toBytes(System.currentTimeMillis()))
+      w.appendFileInfo(HStoreFile.BULKLOAD_TASK_KEY,
+        Bytes.toBytes(regionSplitPartitioner.getPartition(previousRow)))
+      w.appendFileInfo(HStoreFile.MAJOR_COMPACTION_KEY,
+        Bytes.toBytes(true))
+      w.appendFileInfo(HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,
+        Bytes.toBytes(compactionExclude))
+      w.appendTrackedTimestampsToMetadata()
+      w.close()
+
+      val srcPath = w.getPath
+
+      //In the new path you will see that we are using substring.  This is to
+      // remove the '_' character in front of the HFile name.  '_' is a character
+      // that will tell HBase that this file shouldn't be included in the bulk load
+      // This feature is to protect for unfinished HFiles being submitted to HBase
+      val newPath = new Path(w.getPath.getParent, w.getPath.getName.substring(1))
+      if (!fs.rename(srcPath, newPath)) {
+        throw new IOException("Unable to rename '" + srcPath +
+          "' to " + newPath)
+      }
+    }
+  }
+
+  /**
+   * This is a wrapper class around StoreFileWriter.  The reason for the
+   * wrapper is to keep the length of the file along side the writer
+   *
+   * @param written The writer to be wrapped
+   * @param writer  The number of bytes written to the writer
+   */
+  class WriterLength(var written:Long, val writer:StoreFileWriter)
+}
+
+@InterfaceAudience.Private
+object LatestHBaseContextCache {
+  var latest:HBaseContext = null
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseDStreamFunctions.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseDStreamFunctions.scala
new file mode 100644 (file)
index 0000000..4edde44
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark
+
+import org.apache.hadoop.hbase.TableName
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client._
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.spark.streaming.dstream.DStream
+
+import scala.reflect.ClassTag
+
+/**
+ * HBaseDStreamFunctions contains a set of implicit functions that can be
+ * applied to a Spark DStream so that we can easily interact with HBase
+ */
+@InterfaceAudience.Public
+object HBaseDStreamFunctions {
+
+  /**
+   * These are implicit methods for a DStream that contains any type of
+   * data.
+   *
+   * @param dStream  This is for dStreams of any type
+   * @tparam T       Type T
+   */
+  implicit class GenericHBaseDStreamFunctions[T](val dStream: DStream[T]) {
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * put.  This will not return a new Stream.  Think of it like a foreach
+     *
+     * @param hc         The hbaseContext object to identify which
+     *                   HBase cluster connection to use
+     * @param tableName  The tableName that the put will be sent to
+     * @param f          The function that will turn the DStream values
+     *                   into HBase Put objects.
+     */
+    def hbaseBulkPut(hc: HBaseContext,
+                     tableName: TableName,
+                     f: (T) => Put): Unit = {
+      hc.streamBulkPut(dStream, tableName, f)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * get.  This will return a new DStream.  Think about it as a DStream map
+     * function.  In that every DStream value will get a new value out of
+     * HBase.  That new value will populate the newly generated DStream.
+     *
+     * @param hc             The hbaseContext object to identify which
+     *                       HBase cluster connection to use
+     * @param tableName      The tableName that the put will be sent to
+     * @param batchSize      How many gets to execute in a single batch
+     * @param f              The function that will turn the RDD values
+     *                       in HBase Get objects
+     * @param convertResult  The function that will convert a HBase
+     *                       Result object into a value that will go
+     *                       into the resulting DStream
+     * @tparam R             The type of Object that will be coming
+     *                       out of the resulting DStream
+     * @return               A resulting DStream with type R objects
+     */
+    def hbaseBulkGet[R: ClassTag](hc: HBaseContext,
+                     tableName: TableName,
+                     batchSize:Int, f: (T) => Get, convertResult: (Result) => R):
+    DStream[R] = {
+      hc.streamBulkGet[T, R](tableName, batchSize, dStream, f, convertResult)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * get.  This will return a new DStream.  Think about it as a DStream map
+     * function.  In that every DStream value will get a new value out of
+     * HBase.  That new value will populate the newly generated DStream.
+     *
+     * @param hc             The hbaseContext object to identify which
+     *                       HBase cluster connection to use
+     * @param tableName      The tableName that the put will be sent to
+     * @param batchSize      How many gets to execute in a single batch
+     * @param f              The function that will turn the RDD values
+     *                       in HBase Get objects
+     * @return               A resulting DStream with type R objects
+     */
+    def hbaseBulkGet(hc: HBaseContext,
+                     tableName: TableName, batchSize:Int,
+                     f: (T) => Get): DStream[(ImmutableBytesWritable, Result)] = {
+        hc.streamBulkGet[T, (ImmutableBytesWritable, Result)](
+          tableName, batchSize, dStream, f,
+          result => (new ImmutableBytesWritable(result.getRow), result))
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * Delete.  This will not return a new DStream.
+     *
+     * @param hc         The hbaseContext object to identify which HBase
+     *                   cluster connection to use
+     * @param tableName  The tableName that the deletes will be sent to
+     * @param f          The function that will convert the DStream value into
+     *                   a HBase Delete Object
+     * @param batchSize  The number of Deletes to be sent in a single batch
+     */
+    def hbaseBulkDelete(hc: HBaseContext,
+                        tableName: TableName,
+                        f:(T) => Delete, batchSize:Int): Unit = {
+      hc.streamBulkDelete(dStream, tableName, f, batchSize)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's
+     * foreachPartition method.  This will ack very much like a normal DStream
+     * foreach method but for the fact that you will now have a HBase connection
+     * while iterating through the values.
+     *
+     * @param hc  The hbaseContext object to identify which HBase
+     *            cluster connection to use
+     * @param f   This function will get an iterator for a Partition of an
+     *            DStream along with a connection object to HBase
+     */
+    def hbaseForeachPartition(hc: HBaseContext,
+                              f: (Iterator[T], Connection) => Unit): Unit = {
+      hc.streamForeachPartition(dStream, f)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's
+     * mapPartitions method.  This will ask very much like a normal DStream
+     * map partitions method but for the fact that you will now have a
+     * HBase connection while iterating through the values
+     *
+     * @param hc  The hbaseContext object to identify which HBase
+     *            cluster connection to use
+     * @param f   This function will get an iterator for a Partition of an
+     *            DStream along with a connection object to HBase
+     * @tparam R  This is the type of objects that will go into the resulting
+     *            DStream
+     * @return    A resulting DStream of type R
+     */
+    def hbaseMapPartitions[R: ClassTag](hc: HBaseContext,
+                                        f: (Iterator[T], Connection) => Iterator[R]):
+    DStream[R] = {
+      hc.streamMapPartitions(dStream, f)
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseRDDFunctions.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseRDDFunctions.scala
new file mode 100644 (file)
index 0000000..2469c8e
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.util
+
+import org.apache.hadoop.hbase.{HConstants, TableName}
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client._
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.spark.rdd.RDD
+
+import scala.reflect.ClassTag
+
+/**
+ * HBaseRDDFunctions contains a set of implicit functions that can be
+ * applied to a Spark RDD so that we can easily interact with HBase
+ */
+@InterfaceAudience.Public
+object HBaseRDDFunctions
+{
+
+  /**
+   * These are implicit methods for a RDD that contains any type of
+   * data.
+   *
+   * @param rdd This is for rdd of any type
+   * @tparam T  This is any type
+   */
+  implicit class GenericHBaseRDDFunctions[T](val rdd: RDD[T]) {
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * put.  This will not return a new RDD.  Think of it like a foreach
+     *
+     * @param hc         The hbaseContext object to identify which
+     *                   HBase cluster connection to use
+     * @param tableName  The tableName that the put will be sent to
+     * @param f          The function that will turn the RDD values
+     *                   into HBase Put objects.
+     */
+    def hbaseBulkPut(hc: HBaseContext,
+                     tableName: TableName,
+                     f: (T) => Put): Unit = {
+      hc.bulkPut(rdd, tableName, f)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * get.  This will return a new RDD.  Think about it as a RDD map
+     * function.  In that every RDD value will get a new value out of
+     * HBase.  That new value will populate the newly generated RDD.
+     *
+     * @param hc             The hbaseContext object to identify which
+     *                       HBase cluster connection to use
+     * @param tableName      The tableName that the put will be sent to
+     * @param batchSize      How many gets to execute in a single batch
+     * @param f              The function that will turn the RDD values
+     *                       in HBase Get objects
+     * @param convertResult  The function that will convert a HBase
+     *                       Result object into a value that will go
+     *                       into the resulting RDD
+     * @tparam R             The type of Object that will be coming
+     *                       out of the resulting RDD
+     * @return               A resulting RDD with type R objects
+     */
+    def hbaseBulkGet[R: ClassTag](hc: HBaseContext,
+                            tableName: TableName, batchSize:Int,
+                            f: (T) => Get, convertResult: (Result) => R): RDD[R] = {
+      hc.bulkGet[T, R](tableName, batchSize, rdd, f, convertResult)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * get.  This will return a new RDD.  Think about it as a RDD map
+     * function.  In that every RDD value will get a new value out of
+     * HBase.  That new value will populate the newly generated RDD.
+     *
+     * @param hc             The hbaseContext object to identify which
+     *                       HBase cluster connection to use
+     * @param tableName      The tableName that the put will be sent to
+     * @param batchSize      How many gets to execute in a single batch
+     * @param f              The function that will turn the RDD values
+     *                       in HBase Get objects
+     * @return               A resulting RDD with type R objects
+     */
+    def hbaseBulkGet(hc: HBaseContext,
+                                  tableName: TableName, batchSize:Int,
+                                  f: (T) => Get): RDD[(ImmutableBytesWritable, Result)] = {
+      hc.bulkGet[T, (ImmutableBytesWritable, Result)](tableName,
+        batchSize, rdd, f,
+        result => if (result != null && result.getRow != null) {
+          (new ImmutableBytesWritable(result.getRow), result)
+        } else {
+          null
+        })
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's bulk
+     * Delete.  This will not return a new RDD.
+     *
+     * @param hc         The hbaseContext object to identify which HBase
+     *                   cluster connection to use
+     * @param tableName  The tableName that the deletes will be sent to
+     * @param f          The function that will convert the RDD value into
+     *                   a HBase Delete Object
+     * @param batchSize  The number of Deletes to be sent in a single batch
+     */
+    def hbaseBulkDelete(hc: HBaseContext,
+                        tableName: TableName, f:(T) => Delete, batchSize:Int): Unit = {
+      hc.bulkDelete(rdd, tableName, f, batchSize)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's
+     * foreachPartition method.  This will ack very much like a normal RDD
+     * foreach method but for the fact that you will now have a HBase connection
+     * while iterating through the values.
+     *
+     * @param hc  The hbaseContext object to identify which HBase
+     *            cluster connection to use
+     * @param f   This function will get an iterator for a Partition of an
+     *            RDD along with a connection object to HBase
+     */
+    def hbaseForeachPartition(hc: HBaseContext,
+                              f: (Iterator[T], Connection) => Unit): Unit = {
+      hc.foreachPartition(rdd, f)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's
+     * mapPartitions method.  This will ask very much like a normal RDD
+     * map partitions method but for the fact that you will now have a
+     * HBase connection while iterating through the values
+     *
+     * @param hc  The hbaseContext object to identify which HBase
+     *            cluster connection to use
+     * @param f   This function will get an iterator for a Partition of an
+     *            RDD along with a connection object to HBase
+     * @tparam R  This is the type of objects that will go into the resulting
+     *            RDD
+     * @return    A resulting RDD of type R
+     */
+    def hbaseMapPartitions[R: ClassTag](hc: HBaseContext,
+                                        f: (Iterator[T], Connection) => Iterator[R]):
+    RDD[R] = {
+      hc.mapPartitions[T,R](rdd, f)
+    }
+
+    /**
+     * Spark Implementation of HBase Bulk load for wide rows or when
+     * values are not already combined at the time of the map process
+     *
+     * A Spark Implementation of HBase Bulk load
+     *
+     * This will take the content from an existing RDD then sort and shuffle
+     * it with respect to region splits.  The result of that sort and shuffle
+     * will be written to HFiles.
+     *
+     * After this function is executed the user will have to call
+     * LoadIncrementalHFiles.doBulkLoad(...) to move the files into HBase
+     *
+     * Also note this version of bulk load is different from past versions in
+     * that it includes the qualifier as part of the sort process. The
+     * reason for this is to be able to support rows will very large number
+     * of columns.
+     *
+     * @param tableName                      The HBase table we are loading into
+     * @param flatMap                        A flapMap function that will make every row in the RDD
+     *                                       into N cells for the bulk load
+     * @param stagingDir                     The location on the FileSystem to bulk load into
+     * @param familyHFileWriteOptionsMap     Options that will define how the HFile for a
+     *                                       column family is written
+     * @param compactionExclude              Compaction excluded for the HFiles
+     * @param maxSize                        Max size for the HFiles before they roll
+     */
+    def hbaseBulkLoad(hc: HBaseContext,
+                         tableName: TableName,
+                         flatMap: (T) => Iterator[(KeyFamilyQualifier, Array[Byte])],
+                         stagingDir:String,
+                         familyHFileWriteOptionsMap:
+                         util.Map[Array[Byte], FamilyHFileWriteOptions] =
+                         new util.HashMap[Array[Byte], FamilyHFileWriteOptions](),
+                         compactionExclude: Boolean = false,
+                         maxSize:Long = HConstants.DEFAULT_MAX_FILE_SIZE):Unit = {
+      hc.bulkLoad(rdd, tableName,
+        flatMap, stagingDir, familyHFileWriteOptionsMap,
+        compactionExclude, maxSize)
+    }
+
+    /**
+     * Implicit method that gives easy access to HBaseContext's
+     * bulkLoadThinRows method.
+     *
+     * Spark Implementation of HBase Bulk load for short rows some where less then
+     * a 1000 columns.  This bulk load should be faster for tables will thinner
+     * rows then the other spark implementation of bulk load that puts only one
+     * value into a record going into a shuffle
+     *
+     * This will take the content from an existing RDD then sort and shuffle
+     * it with respect to region splits.  The result of that sort and shuffle
+     * will be written to HFiles.
+     *
+     * After this function is executed the user will have to call
+     * LoadIncrementalHFiles.doBulkLoad(...) to move the files into HBase
+     *
+     * In this implementation only the rowKey is given to the shuffle as the key
+     * and all the columns are already linked to the RowKey before the shuffle
+     * stage.  The sorting of the qualifier is done in memory out side of the
+     * shuffle stage
+     *
+     * @param tableName                      The HBase table we are loading into
+     * @param mapFunction                    A function that will convert the RDD records to
+     *                                       the key value format used for the shuffle to prep
+     *                                       for writing to the bulk loaded HFiles
+     * @param stagingDir                     The location on the FileSystem to bulk load into
+     * @param familyHFileWriteOptionsMap     Options that will define how the HFile for a
+     *                                       column family is written
+     * @param compactionExclude              Compaction excluded for the HFiles
+     * @param maxSize                        Max size for the HFiles before they roll
+     */
+    def hbaseBulkLoadThinRows(hc: HBaseContext,
+                      tableName: TableName,
+                      mapFunction: (T) =>
+                        (ByteArrayWrapper, FamiliesQualifiersValues),
+                      stagingDir:String,
+                      familyHFileWriteOptionsMap:
+                      util.Map[Array[Byte], FamilyHFileWriteOptions] =
+                      new util.HashMap[Array[Byte], FamilyHFileWriteOptions](),
+                      compactionExclude: Boolean = false,
+                      maxSize:Long = HConstants.DEFAULT_MAX_FILE_SIZE):Unit = {
+      hc.bulkLoadThinRows(rdd, tableName,
+        mapFunction, stagingDir, familyHFileWriteOptionsMap,
+        compactionExclude, maxSize)
+    }
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala
new file mode 100644 (file)
index 0000000..be6581a
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.util.Map
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.TableName
+import org.apache.hadoop.hbase.util.Pair
+import org.apache.yetus.audience.InterfaceAudience
+import org.apache.hadoop.hbase.client.{Connection, Delete, Get, Put, Result, Scan}
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
+import org.apache.spark.api.java.function.{FlatMapFunction, Function, VoidFunction}
+import org.apache.spark.streaming.api.java.JavaDStream
+
+import java.lang.Iterable
+
+import scala.collection.JavaConversions._
+import scala.reflect.ClassTag
+
+/**
+ * This is the Java Wrapper over HBaseContext which is written in
+ * Scala.  This class will be used by developers that want to
+ * work with Spark or Spark Streaming in Java
+ *
+ * @param jsc    This is the JavaSparkContext that we will wrap
+ * @param config This is the config information to out HBase cluster
+ */
+@InterfaceAudience.Public
+class JavaHBaseContext(@transient val jsc: JavaSparkContext,
+                       @transient val config: Configuration) extends Serializable {
+  val hbaseContext = new HBaseContext(jsc.sc, config)
+
+  /**
+   * A simple enrichment of the traditional Spark javaRdd foreachPartition.
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * @param javaRdd Original javaRdd with data to iterate over
+   * @param f       Function to be given a iterator to iterate through
+   *                the RDD values and a Connection object to interact
+   *                with HBase
+   */
+  def foreachPartition[T](javaRdd: JavaRDD[T],
+                          f: VoidFunction[(java.util.Iterator[T], Connection)]) = {
+
+    hbaseContext.foreachPartition(javaRdd.rdd,
+      (it: Iterator[T], conn: Connection) => {
+        f.call((it, conn))
+      })
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark Streaming dStream foreach
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * @param javaDstream Original DStream with data to iterate over
+   * @param f           Function to be given a iterator to iterate through
+   *                    the JavaDStream values and a Connection object to
+   *                    interact with HBase
+   */
+  def foreachPartition[T](javaDstream: JavaDStream[T],
+                          f: VoidFunction[(Iterator[T], Connection)]) = {
+    hbaseContext.foreachPartition(javaDstream.dstream,
+      (it: Iterator[T], conn: Connection) => f.call(it, conn))
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark JavaRDD mapPartition.
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * Note: Make sure to partition correctly to avoid memory issue when
+   * getting data from HBase
+   *
+   * @param javaRdd Original JavaRdd with data to iterate over
+   * @param f       Function to be given a iterator to iterate through
+   *                the RDD values and a Connection object to interact
+   *                with HBase
+   * @return        Returns a new RDD generated by the user definition
+   *                function just like normal mapPartition
+   */
+  def mapPartitions[T, R](javaRdd: JavaRDD[T],
+                          f: FlatMapFunction[(java.util.Iterator[T],
+                            Connection), R]): JavaRDD[R] = {
+    JavaRDD.fromRDD(hbaseContext.mapPartitions(javaRdd.rdd,
+      (it: Iterator[T], conn: Connection) =>
+        f.call(it, conn))(fakeClassTag[R]))(fakeClassTag[R])
+  }
+
+  /**
+   * A simple enrichment of the traditional Spark Streaming JavaDStream
+   * mapPartition.
+   *
+   * This function differs from the original in that it offers the
+   * developer access to a already connected Connection object
+   *
+   * Note: Do not close the Connection object.  All Connection
+   * management is handled outside this method
+   *
+   * Note: Make sure to partition correctly to avoid memory issue when
+   * getting data from HBase
+   *
+   * @param javaDstream Original JavaDStream with data to iterate over
+   * @param mp          Function to be given a iterator to iterate through
+   *                    the JavaDStream values and a Connection object to
+   *                    interact with HBase
+   * @return            Returns a new JavaDStream generated by the user
+   *                    definition function just like normal mapPartition
+   */
+  def streamMap[T, U](javaDstream: JavaDStream[T],
+                      mp: Function[(Iterator[T], Connection), Iterator[U]]):
+  JavaDStream[U] = {
+    JavaDStream.fromDStream(hbaseContext.streamMapPartitions(javaDstream.dstream,
+      (it: Iterator[T], conn: Connection) =>
+        mp.call(it, conn))(fakeClassTag[U]))(fakeClassTag[U])
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.foreachPartition method.
+   *
+   * It allow addition support for a user to take JavaRDD
+   * and generate puts and send them to HBase.
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param javaRdd   Original JavaRDD with data to iterate over
+   * @param tableName The name of the table to put into
+   * @param f         Function to convert a value in the JavaRDD
+   *                  to a HBase Put
+   */
+  def bulkPut[T](javaRdd: JavaRDD[T],
+                 tableName: TableName,
+                 f: Function[(T), Put]) {
+
+    hbaseContext.bulkPut(javaRdd.rdd, tableName, (t: T) => f.call(t))
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.streamMapPartition method.
+   *
+   * It allow addition support for a user to take a JavaDStream and
+   * generate puts and send them to HBase.
+   *
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param javaDstream Original DStream with data to iterate over
+   * @param tableName   The name of the table to put into
+   * @param f           Function to convert a value in
+   *                    the JavaDStream to a HBase Put
+   */
+  def streamBulkPut[T](javaDstream: JavaDStream[T],
+                       tableName: TableName,
+                       f: Function[T, Put]) = {
+    hbaseContext.streamBulkPut(javaDstream.dstream,
+      tableName,
+      (t: T) => f.call(t))
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.foreachPartition method.
+   *
+   * It allow addition support for a user to take a JavaRDD and
+   * generate delete and send them to HBase.
+   *
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param javaRdd   Original JavaRDD with data to iterate over
+   * @param tableName The name of the table to delete from
+   * @param f         Function to convert a value in the JavaRDD to a
+   *                  HBase Deletes
+   * @param batchSize The number of deletes to batch before sending to HBase
+   */
+  def bulkDelete[T](javaRdd: JavaRDD[T], tableName: TableName,
+                    f: Function[T, Delete], batchSize: Integer) {
+    hbaseContext.bulkDelete(javaRdd.rdd, tableName, (t: T) => f.call(t), batchSize)
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.streamBulkMutation method.
+   *
+   * It allow addition support for a user to take a JavaDStream and
+   * generate Delete and send them to HBase.
+   *
+   * The complexity of managing the Connection is
+   * removed from the developer
+   *
+   * @param javaDStream Original DStream with data to iterate over
+   * @param tableName   The name of the table to delete from
+   * @param f           Function to convert a value in the JavaDStream to a
+   *                    HBase Delete
+   * @param batchSize   The number of deletes to be sent at once
+   */
+  def streamBulkDelete[T](javaDStream: JavaDStream[T],
+                          tableName: TableName,
+                          f: Function[T, Delete],
+                          batchSize: Integer) = {
+    hbaseContext.streamBulkDelete(javaDStream.dstream, tableName,
+      (t: T) => f.call(t),
+      batchSize)
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.mapPartition method.
+   *
+   * It allow addition support for a user to take a JavaRDD and generates a
+   * new RDD based on Gets and the results they bring back from HBase
+   *
+   * @param tableName     The name of the table to get from
+   * @param batchSize     batch size of how many gets to retrieve in a single fetch
+   * @param javaRdd       Original JavaRDD with data to iterate over
+   * @param makeGet       Function to convert a value in the JavaRDD to a
+   *                      HBase Get
+   * @param convertResult This will convert the HBase Result object to
+   *                      what ever the user wants to put in the resulting
+   *                      JavaRDD
+   * @return              New JavaRDD that is created by the Get to HBase
+   */
+  def bulkGet[T, U](tableName: TableName,
+                    batchSize: Integer,
+                    javaRdd: JavaRDD[T],
+                    makeGet: Function[T, Get],
+                    convertResult: Function[Result, U]): JavaRDD[U] = {
+
+    JavaRDD.fromRDD(hbaseContext.bulkGet[T, U](tableName,
+      batchSize,
+      javaRdd.rdd,
+      (t: T) => makeGet.call(t),
+      (r: Result) => {
+        convertResult.call(r)
+      })(fakeClassTag[U]))(fakeClassTag[U])
+
+  }
+
+  /**
+   * A simple abstraction over the HBaseContext.streamMap method.
+   *
+   * It allow addition support for a user to take a DStream and
+   * generates a new DStream based on Gets and the results
+   * they bring back from HBase
+   *
+   * @param tableName     The name of the table to get from
+   * @param batchSize     The number of gets to be batched together
+   * @param javaDStream   Original DStream with data to iterate over
+   * @param makeGet       Function to convert a value in the JavaDStream to a
+   *                      HBase Get
+   * @param convertResult This will convert the HBase Result object to
+   *                      what ever the user wants to put in the resulting
+   *                      JavaDStream
+   * @return              New JavaDStream that is created by the Get to HBase
+   */
+  def streamBulkGet[T, U](tableName: TableName,
+                          batchSize: Integer,
+                          javaDStream: JavaDStream[T],
+                          makeGet: Function[T, Get],
+                          convertResult: Function[Result, U]): JavaDStream[U] = {
+    JavaDStream.fromDStream(hbaseContext.streamBulkGet(tableName,
+      batchSize,
+      javaDStream.dstream,
+      (t: T) => makeGet.call(t),
+      (r: Result) => convertResult.call(r))(fakeClassTag[U]))(fakeClassTag[U])
+  }
+
+  /**
+    * A simple abstraction over the HBaseContext.bulkLoad method.
+    * It allow addition support for a user to take a JavaRDD and
+    * convert into new JavaRDD[Pair] based on MapFunction,
+    * and HFiles will be generated in stagingDir for bulk load
+    *
+    * @param javaRdd                        The javaRDD we are bulk loading from
+    * @param tableName                      The HBase table we are loading into
+    * @param mapFunc                        A Function that will convert a value in JavaRDD
+    *                                       to Pair(KeyFamilyQualifier, Array[Byte])
+    * @param stagingDir                     The location on the FileSystem to bulk load into
+    * @param familyHFileWriteOptionsMap     Options that will define how the HFile for a
+    *                                       column family is written
+    * @param compactionExclude              Compaction excluded for the HFiles
+    * @param maxSize                        Max size for the HFiles before they roll
+    */
+  def bulkLoad[T](javaRdd: JavaRDD[T],
+                  tableName: TableName,
+                  mapFunc : Function[T, Pair[KeyFamilyQualifier, Array[Byte]]],
+                  stagingDir: String,
+                  familyHFileWriteOptionsMap: Map[Array[Byte], FamilyHFileWriteOptions],
+                  compactionExclude: Boolean,
+                  maxSize: Long):
+  Unit = {
+    hbaseContext.bulkLoad[Pair[KeyFamilyQualifier, Array[Byte]]](javaRdd.map(mapFunc).rdd, tableName, t => {
+      val keyFamilyQualifier = t.getFirst
+      val value = t.getSecond
+      Seq((keyFamilyQualifier, value)).iterator
+    }, stagingDir, familyHFileWriteOptionsMap, compactionExclude, maxSize)
+  }
+
+  /**
+    * A simple abstraction over the HBaseContext.bulkLoadThinRows method.
+    * It allow addition support for a user to take a JavaRDD and
+    * convert into new JavaRDD[Pair] based on MapFunction,
+    * and HFiles will be generated in stagingDir for bulk load
+    *
+    * @param javaRdd                        The javaRDD we are bulk loading from
+    * @param tableName                      The HBase table we are loading into
+    * @param mapFunc                        A Function that will convert a value in JavaRDD
+    *                                       to Pair(ByteArrayWrapper, FamiliesQualifiersValues)
+    * @param stagingDir                     The location on the FileSystem to bulk load into
+    * @param familyHFileWriteOptionsMap     Options that will define how the HFile for a
+    *                                       column family is written
+    * @param compactionExclude              Compaction excluded for the HFiles
+    * @param maxSize                        Max size for the HFiles before they roll
+    */
+  def bulkLoadThinRows[T](javaRdd: JavaRDD[T],
+                       tableName: TableName,
+                       mapFunc : Function[T, Pair[ByteArrayWrapper, FamiliesQualifiersValues]],
+                       stagingDir: String,
+                       familyHFileWriteOptionsMap: Map[Array[Byte], FamilyHFileWriteOptions],
+                       compactionExclude: Boolean,
+                       maxSize: Long):
+  Unit = {
+    hbaseContext.bulkLoadThinRows[Pair[ByteArrayWrapper, FamiliesQualifiersValues]](javaRdd.map(mapFunc).rdd,
+      tableName, t => {
+      (t.getFirst, t.getSecond)
+    }, stagingDir, familyHFileWriteOptionsMap, compactionExclude, maxSize)
+  }
+
+  /**
+   * This function will use the native HBase TableInputFormat with the
+   * given scan object to generate a new JavaRDD
+   *
+   * @param tableName The name of the table to scan
+   * @param scans     The HBase scan object to use to read data from HBase
+   * @param f         Function to convert a Result object from HBase into
+   *                  What the user wants in the final generated JavaRDD
+   * @return          New JavaRDD with results from scan
+   */
+  def hbaseRDD[U](tableName: TableName,
+                  scans: Scan,
+                  f: Function[(ImmutableBytesWritable, Result), U]):
+  JavaRDD[U] = {
+    JavaRDD.fromRDD(
+      hbaseContext.hbaseRDD[U](tableName,
+        scans,
+        (v: (ImmutableBytesWritable, Result)) =>
+          f.call(v._1, v._2))(fakeClassTag[U]))(fakeClassTag[U])
+  }
+
+  /**
+   * A overloaded version of HBaseContext hbaseRDD that define the
+   * type of the resulting JavaRDD
+   *
+   * @param tableName The name of the table to scan
+   * @param scans     The HBase scan object to use to read data from HBase
+   * @return          New JavaRDD with results from scan
+   */
+  def hbaseRDD(tableName: TableName,
+               scans: Scan):
+  JavaRDD[(ImmutableBytesWritable, Result)] = {
+    JavaRDD.fromRDD(hbaseContext.hbaseRDD(tableName, scans))
+  }
+
+  /**
+   * Produces a ClassTag[T], which is actually just a casted ClassTag[AnyRef].
+   *
+   * This method is used to keep ClassTags out of the external Java API, as the Java compiler
+   * cannot produce them automatically. While this ClassTag-faking does please the compiler,
+   * it can cause problems at runtime if the Scala API relies on ClassTags for correctness.
+   *
+   * Often, though, a ClassTag[AnyRef] will not lead to incorrect behavior,
+   * just worse performance or security issues.
+   * For instance, an Array[AnyRef] can hold any type T,
+   * but may lose primitive
+   * specialization.
+   */
+  private[spark]
+  def fakeClassTag[T]: ClassTag[T] = ClassTag.AnyRef.asInstanceOf[ClassTag[T]]
+
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/KeyFamilyQualifier.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/KeyFamilyQualifier.scala
new file mode 100644 (file)
index 0000000..7fd5a62
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import java.io.Serializable
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Bytes
+
+/**
+ * This is the key to be used for sorting and shuffling.
+ *
+ * We will only partition on the rowKey but we will sort on all three
+ *
+ * @param rowKey    Record RowKey
+ * @param family    Record ColumnFamily
+ * @param qualifier Cell Qualifier
+ */
+@InterfaceAudience.Public
+class KeyFamilyQualifier(val rowKey:Array[Byte], val family:Array[Byte], val qualifier:Array[Byte])
+  extends Comparable[KeyFamilyQualifier] with Serializable {
+  override def compareTo(o: KeyFamilyQualifier): Int = {
+    var result = Bytes.compareTo(rowKey, o.rowKey)
+    if (result == 0) {
+      result = Bytes.compareTo(family, o.family)
+      if (result == 0) result = Bytes.compareTo(qualifier, o.qualifier)
+    }
+    result
+  }
+  override def toString: String = {
+    Bytes.toString(rowKey) + ":" + Bytes.toString(family) + ":" + Bytes.toString(qualifier)
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
new file mode 100644 (file)
index 0000000..a92f4e0
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import org.apache.yetus.audience.InterfaceAudience
+import org.slf4j.impl.StaticLoggerBinder
+import org.slf4j.Logger
+import org.slf4j.LoggerFactory
+
+/**
+ * Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
+ * logging messages at different levels using methods that only evaluate parameters lazily if the
+ * log level is enabled.
+ * Logging is private in Spark 2.0
+ * This is to isolate incompatibilties across Spark releases.
+ */
+@InterfaceAudience.Private
+trait Logging {
+
+  // Make the log field transient so that objects with Logging can
+  // be serialized and used on another machine
+  @transient private var log_ : Logger = null
+
+  // Method to get the logger name for this object
+  protected def logName = {
+    // Ignore trailing $'s in the class names for Scala objects
+    this.getClass.getName.stripSuffix("$")
+  }
+
+  // Method to get or create the logger for this object
+  protected def log: Logger = {
+    if (log_ == null) {
+      initializeLogIfNecessary(false)
+      log_ = LoggerFactory.getLogger(logName)
+    }
+    log_
+  }
+
+  // Log methods that take only a String
+  protected def logInfo(msg: => String) {
+    if (log.isInfoEnabled) log.info(msg)
+  }
+
+  protected def logDebug(msg: => String) {
+    if (log.isDebugEnabled) log.debug(msg)
+  }
+
+  protected def logTrace(msg: => String) {
+    if (log.isTraceEnabled) log.trace(msg)
+  }
+
+  protected def logWarning(msg: => String) {
+    if (log.isWarnEnabled) log.warn(msg)
+  }
+
+  protected def logError(msg: => String) {
+    if (log.isErrorEnabled) log.error(msg)
+  }
+
+  // Log methods that take Throwables (Exceptions/Errors) too
+  protected def logInfo(msg: => String, throwable: Throwable) {
+    if (log.isInfoEnabled) log.info(msg, throwable)
+  }
+
+  protected def logDebug(msg: => String, throwable: Throwable) {
+    if (log.isDebugEnabled) log.debug(msg, throwable)
+  }
+
+  protected def logTrace(msg: => String, throwable: Throwable) {
+    if (log.isTraceEnabled) log.trace(msg, throwable)
+  }
+
+  protected def logWarning(msg: => String, throwable: Throwable) {
+    if (log.isWarnEnabled) log.warn(msg, throwable)
+  }
+
+  protected def logError(msg: => String, throwable: Throwable) {
+    if (log.isErrorEnabled) log.error(msg, throwable)
+  }
+
+  protected def initializeLogIfNecessary(isInterpreter: Boolean): Unit = {
+    if (!Logging.initialized) {
+      Logging.initLock.synchronized {
+        if (!Logging.initialized) {
+          initializeLogging(isInterpreter)
+        }
+      }
+    }
+  }
+
+  private def initializeLogging(isInterpreter: Boolean): Unit = {
+    // Don't use a logger in here, as this is itself occurring during initialization of a logger
+    // If Log4j 1.2 is being used, but is not initialized, load a default properties file
+    val binderClass = StaticLoggerBinder.getSingleton.getLoggerFactoryClassStr
+    Logging.initialized = true
+
+    // Force a call into slf4j to initialize it. Avoids this happening from multiple threads
+    // and triggering this: http://mailman.qos.ch/pipermail/slf4j-dev/2010-April/002956.html
+    log
+  }
+}
+
+private object Logging {
+  @volatile private var initialized = false
+  val initLock = new Object()
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/NewHBaseRDD.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/NewHBaseRDD.scala
new file mode 100644 (file)
index 0000000..7088ce9
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.mapreduce.InputFormat
+import org.apache.spark.rdd.NewHadoopRDD
+import org.apache.spark.{InterruptibleIterator, Partition, SparkContext, TaskContext}
+
+@InterfaceAudience.Public
+class NewHBaseRDD[K,V](@transient val sc : SparkContext,
+                       @transient val inputFormatClass: Class[_ <: InputFormat[K, V]],
+                       @transient val keyClass: Class[K],
+                       @transient val valueClass: Class[V],
+                       @transient private val __conf: Configuration,
+                       val hBaseContext: HBaseContext) extends NewHadoopRDD(sc, inputFormatClass, keyClass, valueClass, __conf) {
+
+  override def compute(theSplit: Partition, context: TaskContext): InterruptibleIterator[(K, V)] = {
+    hBaseContext.applyCreds()
+    super.compute(theSplit, context)
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/Bound.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/Bound.scala
new file mode 100644 (file)
index 0000000..4602ac8
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark.datasources
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.spark.hbase._
+
+/**
+ * The Bound represent the boudary for the scan
+ *
+ * @param b The byte array of the bound
+ * @param inc inclusive or not.
+ */
+@InterfaceAudience.Private
+case class Bound(b: Array[Byte], inc: Boolean)
+// The non-overlapping ranges we need to scan, if lower is equal to upper, it is a get request
+
+@InterfaceAudience.Private
+case class Range(lower: Option[Bound], upper: Option[Bound])
+
+@InterfaceAudience.Private
+object Range {
+  def apply(region: HBaseRegion): Range = {
+    Range(region.start.map(Bound(_, true)), if (region.end.get.size == 0) {
+      None
+    } else {
+      region.end.map((Bound(_, false)))
+    })
+  }
+}
+
+@InterfaceAudience.Private
+object Ranges {
+  // We assume that
+  // 1. r.lower.inc is true, and r.upper.inc is false
+  // 2. for each range in rs, its upper.inc is false
+  def and(r: Range, rs: Seq[Range]): Seq[Range] = {
+    rs.flatMap{ s =>
+      val lower = s.lower.map { x =>
+        // the scan has lower bound
+        r.lower.map { y =>
+          // the region has lower bound
+          if (ord.compare(x.b, y.b) < 0) {
+            // scan lower bound is smaller than region server lower bound
+            Some(y)
+          } else {
+            // scan low bound is greater or equal to region server lower bound
+            Some(x)
+          }
+        }.getOrElse(Some(x))
+      }.getOrElse(r.lower)
+
+      val upper =  s.upper.map { x =>
+        // the scan has upper bound
+        r.upper.map { y =>
+          // the region has upper bound
+          if (ord.compare(x.b, y.b) >= 0) {
+            // scan upper bound is larger than server upper bound
+            // but region server scan stop is exclusive. It is OK here.
+            Some(y)
+          } else {
+            // scan upper bound is less or equal to region server upper bound
+            Some(x)
+          }
+        }.getOrElse(Some(x))
+      }.getOrElse(r.upper)
+
+      val c = lower.map { case x =>
+        upper.map { case y =>
+          ord.compare(x.b, y.b)
+        }.getOrElse(-1)
+      }.getOrElse(-1)
+      if (c < 0) {
+        Some(Range(lower, upper))
+      } else {
+        None
+      }
+    }.seq
+  }
+}
+
+@InterfaceAudience.Private
+object Points {
+  def and(r: Range, ps: Seq[Array[Byte]]): Seq[Array[Byte]] = {
+    ps.flatMap { p =>
+      if (ord.compare(r.lower.get.b, p) <= 0) {
+        // if region lower bound is less or equal to the point
+        if (r.upper.isDefined) {
+          // if region upper bound is defined
+          if (ord.compare(r.upper.get.b, p) > 0) {
+            // if the upper bound is greater than the point (because upper bound is exclusive)
+            Some(p)
+          } else {
+            None
+          }
+        } else {
+          // if the region upper bound is not defined (infinity)
+          Some(p)
+        }
+      } else {
+        None
+      }
+    }
+  }
+}
+
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/DataTypeParserWrapper.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/DataTypeParserWrapper.scala
new file mode 100644 (file)
index 0000000..c0ccc92
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark.datasources
+
+import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
+import org.apache.spark.sql.types.DataType
+import org.apache.yetus.audience.InterfaceAudience
+
+@InterfaceAudience.Private
+trait DataTypeParser {
+  def parse(dataTypeString: String): DataType
+}
+
+@InterfaceAudience.Private
+object DataTypeParserWrapper extends DataTypeParser{
+  def parse(dataTypeString: String): DataType = CatalystSqlParser.parseDataType(dataTypeString)
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseResources.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseResources.scala
new file mode 100644 (file)
index 0000000..0f467a7
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark.datasources
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName
+import org.apache.hadoop.hbase.client._
+import org.apache.hadoop.hbase.spark.{HBaseConnectionKey, SmartConnection,
+  HBaseConnectionCache, HBaseRelation}
+import scala.language.implicitConversions
+
+// Resource and ReferencedResources are defined for extensibility,
+// e.g., consolidate scan and bulkGet in the future work.
+
+// User has to invoke release explicitly to release the resource,
+// and potentially parent resources
+@InterfaceAudience.Private
+trait Resource {
+  def release(): Unit
+}
+
+@InterfaceAudience.Private
+case class ScanResource(tbr: TableResource, rs: ResultScanner) extends Resource {
+  def release() {
+    rs.close()
+    tbr.release()
+  }
+}
+
+@InterfaceAudience.Private
+case class GetResource(tbr: TableResource, rs: Array[Result]) extends Resource {
+  def release() {
+    tbr.release()
+  }
+}
+
+@InterfaceAudience.Private
+trait ReferencedResource {
+  var count: Int = 0
+  def init(): Unit
+  def destroy(): Unit
+  def acquire() = synchronized {
+    try {
+      count += 1
+      if (count == 1) {
+        init()
+      }
+    } catch {
+      case e: Throwable =>
+        release()
+        throw e
+    }
+  }
+
+  def release() = synchronized {
+    count -= 1
+    if (count == 0) {
+      destroy()
+    }
+  }
+
+  def releaseOnException[T](func: => T): T = {
+    acquire()
+    val ret = {
+      try {
+        func
+      } catch {
+        case e: Throwable =>
+          release()
+          throw e
+      }
+    }
+    ret
+  }
+}
+
+@InterfaceAudience.Private
+case class TableResource(relation: HBaseRelation) extends ReferencedResource {
+  var connection: SmartConnection = _
+  var table: Table = _
+
+  override def init(): Unit = {
+    connection = HBaseConnectionCache.getConnection(relation.hbaseConf)
+    table = connection.getTable(TableName.valueOf(relation.tableName))
+  }
+
+  override def destroy(): Unit = {
+    if (table != null) {
+      table.close()
+      table = null
+    }
+    if (connection != null) {
+      connection.close()
+      connection = null
+    }
+  }
+
+  def getScanner(scan: Scan): ScanResource = releaseOnException {
+    ScanResource(this, table.getScanner(scan))
+  }
+
+  def get(list: java.util.List[org.apache.hadoop.hbase.client.Get]) = releaseOnException {
+    GetResource(this, table.get(list))
+  }
+}
+
+@InterfaceAudience.Private
+case class RegionResource(relation: HBaseRelation) extends ReferencedResource {
+  var connection: SmartConnection = _
+  var rl: RegionLocator = _
+  val regions = releaseOnException {
+    val keys = rl.getStartEndKeys
+    keys.getFirst.zip(keys.getSecond)
+      .zipWithIndex
+      .map(x =>
+      HBaseRegion(x._2,
+        Some(x._1._1),
+        Some(x._1._2),
+        Some(rl.getRegionLocation(x._1._1).getHostname)))
+  }
+
+  override def init(): Unit = {
+    connection = HBaseConnectionCache.getConnection(relation.hbaseConf)
+    rl = connection.getRegionLocator(TableName.valueOf(relation.tableName))
+  }
+
+  override def destroy(): Unit = {
+    if (rl != null) {
+      rl.close()
+      rl = null
+    }
+    if (connection != null) {
+      connection.close()
+      connection = null
+    }
+  }
+}
+
+@InterfaceAudience.Private
+object HBaseResources{
+  implicit def ScanResToScan(sr: ScanResource): ResultScanner = {
+    sr.rs
+  }
+
+  implicit def GetResToResult(gr: GetResource): Array[Result] = {
+    gr.rs
+  }
+
+  implicit def TableResToTable(tr: TableResource): Table = {
+    tr.table
+  }
+
+  implicit def RegionResToRegions(rr: RegionResource): Seq[HBaseRegion] = {
+    rr.regions
+  }
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
new file mode 100644 (file)
index 0000000..dc497f9
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark.datasources
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This is the hbase configuration. User can either set them in SparkConf, which
+ * will take effect globally, or configure it per table, which will overwrite the value
+ * set in SparkConf. If not set, the default value will take effect.
+ */
+@InterfaceAudience.Public
+object HBaseSparkConf{
+  /** Set to false to disable server-side caching of blocks for this scan,
+   *  false by default, since full table scans generate too much BC churn.
+   */
+  val QUERY_CACHEBLOCKS = "hbase.spark.query.cacheblocks"
+  val DEFAULT_QUERY_CACHEBLOCKS = false
+  /** The number of rows for caching that will be passed to scan. */
+  val QUERY_CACHEDROWS = "hbase.spark.query.cachedrows"
+  /** Set the maximum number of values to return for each call to next() in scan. */
+  val QUERY_BATCHSIZE = "hbase.spark.query.batchsize"
+  /** The number of BulkGets send to HBase. */
+  val BULKGET_SIZE = "hbase.spark.bulkget.size"
+  val DEFAULT_BULKGET_SIZE = 1000
+  /** Set to specify the location of hbase configuration file. */
+  val HBASE_CONFIG_LOCATION = "hbase.spark.config.location"
+  /** Set to specify whether create or use latest cached HBaseContext*/
+  val USE_HBASECONTEXT = "hbase.spark.use.hbasecontext"
+  val DEFAULT_USE_HBASECONTEXT = true
+  /** Pushdown the filter to data source engine to increase the performance of queries. */
+  val PUSHDOWN_COLUMN_FILTER = "hbase.spark.pushdown.columnfilter"
+  val DEFAULT_PUSHDOWN_COLUMN_FILTER= true
+  /** Class name of the encoder, which encode data types from Spark to HBase bytes. */
+  val QUERY_ENCODER = "hbase.spark.query.encoder"
+  val DEFAULT_QUERY_ENCODER = classOf[NaiveEncoder].getCanonicalName
+  /** The timestamp used to filter columns with a specific timestamp. */
+  val TIMESTAMP = "hbase.spark.query.timestamp"
+  /** The starting timestamp used to filter columns with a specific range of versions. */
+  val TIMERANGE_START = "hbase.spark.query.timerange.start"
+  /** The ending timestamp used to filter columns with a specific range of versions. */
+  val TIMERANGE_END =  "hbase.spark.query.timerange.end"
+  /** The maximum number of version to return. */
+  val MAX_VERSIONS = "hbase.spark.query.maxVersions"
+  /** Delayed time to close hbase-spark connection when no reference to this connection, in milliseconds. */
+  val DEFAULT_CONNECTION_CLOSE_DELAY = 10 * 60 * 1000
+}
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableCatalog.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableCatalog.scala
new file mode 100644 (file)
index 0000000..d2a8a3e
--- /dev/null
@@ -0,0 +1,372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark.datasources
+
+import org.apache.avro.Schema
+import org.apache.yetus.audience.InterfaceAudience
+import org.apache.hadoop.hbase.spark.{Logging, SchemaConverters}
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.spark.sql.types._
+import org.json4s.jackson.JsonMethods._
+
+import scala.collection.mutable
+
+// The definition of each column cell, which may be composite type
+// TODO: add avro support
+@InterfaceAudience.Private
+case class Field(
+    colName: String,
+    cf: String,
+    col: String,
+    sType: Option[String] = None,
+    avroSchema: Option[String] = None,
+    serdes: Option[SerDes]= None,
+    len: Int = -1) extends Logging {
+  override def toString = s"$colName $cf $col"
+  val isRowKey = cf == HBaseTableCatalog.rowKey
+  var start: Int = _
+  def schema: Option[Schema] = avroSchema.map { x =>
+    logDebug(s"avro: $x")
+    val p = new Schema.Parser
+    p.parse(x)
+  }
+
+  lazy val exeSchema = schema
+
+  // converter from avro to catalyst structure
+  lazy val avroToCatalyst: Option[Any => Any] = {
+    schema.map(SchemaConverters.createConverterToSQL(_))
+  }
+
+  // converter from catalyst to avro
+  lazy val catalystToAvro: (Any) => Any ={
+    SchemaConverters.createConverterToAvro(dt, colName, "recordNamespace")
+  }
+
+  def cfBytes: Array[Byte] = {
+    if (isRowKey) {
+      Bytes.toBytes("")
+    } else {
+      Bytes.toBytes(cf)
+    }
+  }
+  def colBytes: Array[Byte] = {
+    if (isRowKey) {
+      Bytes.toBytes("key")
+    } else {
+      Bytes.toBytes(col)
+    }
+  }
+
+  val dt = {
+    sType.map(DataTypeParserWrapper.parse(_)).getOrElse{
+      schema.map{ x=>
+        SchemaConverters.toSqlType(x).dataType
+      }.get
+    }
+  }
+
+  var length: Int = {
+    if (len == -1) {
+      dt match {
+        case BinaryType | StringType => -1
+        case BooleanType => Bytes.SIZEOF_BOOLEAN
+        case ByteType => 1
+        case DoubleType => Bytes.SIZEOF_DOUBLE
+        case FloatType => Bytes.SIZEOF_FLOAT
+        case IntegerType => Bytes.SIZEOF_INT
+        case LongType => Bytes.SIZEOF_LONG
+        case ShortType => Bytes.SIZEOF_SHORT
+        case _ => -1
+      }
+    } else {
+      len
+    }
+
+  }
+
+  override def equals(other: Any): Boolean = other match {
+    case that: Field =>
+      colName == that.colName && cf == that.cf && col == that.col
+    case _ => false
+  }
+}
+
+// The row key definition, with each key refer to the col defined in Field, e.g.,
+// key1:key2:key3
+@InterfaceAudience.Private
+case class RowKey(k: String) {
+  val keys = k.split(":")
+  var fields: Seq[Field] = _
+  var varLength = false
+  def length = {
+    if (varLength) {
+      -1
+    } else {
+      fields.foldLeft(0){case (x, y) =>
+        x + y.length
+      }
+    }
+  }
+}
+// The map between the column presented to Spark and the HBase field
+@InterfaceAudience.Private
+case class SchemaMap(map: mutable.HashMap[String, Field]) {
+  def toFields = map.map { case (name, field) =>
+    StructField(name, field.dt)
+  }.toSeq
+
+  def fields = map.values
+
+  def getField(name: String) = map(name)
+}
+
+
+// The definition of HBase and Relation relation schema
+@InterfaceAudience.Private
+case class HBaseTableCatalog(
+     namespace: String,
+     name: String,
+     row: RowKey,
+     sMap: SchemaMap,
+     @transient params: Map[String, String]) extends Logging {
+  def toDataType = StructType(sMap.toFields)
+  def getField(name: String) = sMap.getField(name)
+  def getRowKey: Seq[Field] = row.fields
+  def getPrimaryKey= row.keys(0)
+  def getColumnFamilies = {
+    sMap.fields.map(_.cf).filter(_ != HBaseTableCatalog.rowKey).toSeq.distinct
+  }
+
+  def get(key: String) = params.get(key)
+
+  // Setup the start and length for each dimension of row key at runtime.
+  def dynSetupRowKey(rowKey: Array[Byte]) {
+    logDebug(s"length: ${rowKey.length}")
+    if(row.varLength) {
+      var start = 0
+      row.fields.foreach { f =>
+        logDebug(s"start: $start")
+        f.start = start
+        f.length = {
+          // If the length is not defined
+          if (f.length == -1) {
+            f.dt match {
+              case StringType =>
+                var pos = rowKey.indexOf(HBaseTableCatalog.delimiter, start)
+                if (pos == -1 || pos > rowKey.length) {
+                  // this is at the last dimension
+                  pos = rowKey.length
+                }
+                pos - start
+              // We don't know the length, assume it extend to the end of the rowkey.
+              case _ => rowKey.length - start
+            }
+          } else {
+            f.length
+          }
+        }
+        start += f.length
+      }
+    }
+  }
+
+  def initRowKey = {
+    val fields = sMap.fields.filter(_.cf == HBaseTableCatalog.rowKey)
+    row.fields = row.keys.flatMap(n => fields.find(_.col == n))
+    // The length is determined at run time if it is string or binary and the length is undefined.
+    if (row.fields.filter(_.length == -1).isEmpty) {
+      var start = 0
+      row.fields.foreach { f =>
+        f.start = start
+        start += f.length
+      }
+    } else {
+      row.varLength = true
+    }
+  }
+  initRowKey
+}
+
+@InterfaceAudience.Public
+object HBaseTableCatalog {
+  // If defined and larger than 3, a new table will be created with the nubmer of region specified.
+  val newTable = "newtable"
+  // The json string specifying hbase catalog information
+  val regionStart = "regionStart"
+  val defaultRegionStart = "aaaaaaa"
+  val regionEnd = "regionEnd"
+  val defaultRegionEnd = "zzzzzzz"
+  val tableCatalog = "catalog"
+  // The row key with format key1:key2 specifying table row key
+  val rowKey = "rowkey"
+  // The key for hbase table whose value specify namespace and table name
+  val table = "table"
+  // The namespace of hbase table
+  val nameSpace = "namespace"
+  // The name of hbase table
+  val tableName = "name"
+  // The name of columns in hbase catalog
+  val columns = "columns"
+  val cf = "cf"
+  val col = "col"
+  val `type` = "type"
+  // the name of avro schema json string
+  val avro = "avro"
+  val delimiter: Byte = 0
+  val serdes = "serdes"
+  val length = "length"
+
+  /**
+    * User provide table schema definition
+    * {"tablename":"name", "rowkey":"key1:key2",
+    * "columns":{"col1":{"cf":"cf1", "col":"col1", "type":"type1"},
+    * "col2":{"cf":"cf2", "col":"col2", "type":"type2"}}}
+    * Note that any col in the rowKey, there has to be one corresponding col defined in columns
+    */
+  def apply(params: Map[String, String]): HBaseTableCatalog = {
+    val parameters = convert(params)
+    //  println(jString)
+    val jString = parameters(tableCatalog)
+    val map = parse(jString).values.asInstanceOf[Map[String, _]]
+    val tableMeta = map.get(table).get.asInstanceOf[Map[String, _]]
+    val nSpace = tableMeta.get(nameSpace).getOrElse("default").asInstanceOf[String]
+    val tName = tableMeta.get(tableName).get.asInstanceOf[String]
+    val cIter = map.get(columns).get.asInstanceOf[Map[String, Map[String, String]]].toIterator
+    val schemaMap = mutable.HashMap.empty[String, Field]
+    cIter.foreach { case (name, column) =>
+      val sd = {
+        column.get(serdes).asInstanceOf[Option[String]].map(n =>
+          Class.forName(n).newInstance().asInstanceOf[SerDes]
+        )
+      }
+      val len = column.get(length).map(_.toInt).getOrElse(-1)
+      val sAvro = column.get(avro).map(parameters(_))
+      val f = Field(name, column.getOrElse(cf, rowKey),
+        column.get(col).get,
+        column.get(`type`),
+        sAvro, sd, len)
+      schemaMap.+=((name, f))
+    }
+    val rKey = RowKey(map.get(rowKey).get.asInstanceOf[String])
+    HBaseTableCatalog(nSpace, tName, rKey, SchemaMap(schemaMap), parameters)
+  }
+
+  val TABLE_KEY: String = "hbase.table"
+  val SCHEMA_COLUMNS_MAPPING_KEY: String = "hbase.columns.mapping"
+
+  /* for backward compatibility. Convert the old definition to new json based definition formated as below
+    val catalog = s"""{
+                      |"table":{"namespace":"default", "name":"htable"},
+                      |"rowkey":"key1:key2",
+                      |"columns":{
+                      |"col1":{"cf":"rowkey", "col":"key1", "type":"string"},
+                      |"col2":{"cf":"rowkey", "col":"key2", "type":"double"},
+                      |"col3":{"cf":"cf1", "col":"col2", "type":"binary"},
+                      |"col4":{"cf":"cf1", "col":"col3", "type":"timestamp"},
+                      |"col5":{"cf":"cf1", "col":"col4", "type":"double", "serdes":"${classOf[DoubleSerDes].getName}"},
+                      |"col6":{"cf":"cf1", "col":"col5", "type":"$map"},
+                      |"col7":{"cf":"cf1", "col":"col6", "type":"$array"},
+                      |"col8":{"cf":"cf1", "col":"col7", "type":"$arrayMap"}
+                      |}
+                      |}""".stripMargin
+   */
+  @deprecated("Please use new json format to define HBaseCatalog")
+  // TODO: There is no need to deprecate since this is the first release.
+  def convert(parameters: Map[String, String]): Map[String, String] = {
+    val tableName = parameters.get(TABLE_KEY).getOrElse(null)
+    // if the hbase.table is not defined, we assume it is json format already.
+    if (tableName == null) return parameters
+    val schemaMappingString = parameters.getOrElse(SCHEMA_COLUMNS_MAPPING_KEY, "")
+    import scala.collection.JavaConverters._
+    val schemaMap = generateSchemaMappingMap(schemaMappingString).asScala.map(_._2.asInstanceOf[SchemaQualifierDefinition])
+
+    val rowkey = schemaMap.filter {
+      _.columnFamily == "rowkey"
+    }.map(_.columnName)
+    val cols = schemaMap.map { x =>
+      s""""${x.columnName}":{"cf":"${x.columnFamily}", "col":"${x.qualifier}", "type":"${x.colType}"}""".stripMargin
+    }
+    val jsonCatalog =
+      s"""{
+         |"table":{"namespace":"default", "name":"${tableName}"},
+         |"rowkey":"${rowkey.mkString(":")}",
+         |"columns":{
+         |${cols.mkString(",")}
+         |}
+         |}
+       """.stripMargin
+    parameters ++ Map(HBaseTableCatalog.tableCatalog->jsonCatalog)
+  }
+
+  /**
+    * Reads the SCHEMA_COLUMNS_MAPPING_KEY and converts it to a map of
+    * SchemaQualifierDefinitions with the original sql column name as the key
+    *
+    * @param schemaMappingString The schema mapping string from the SparkSQL map
+    * @return                    A map of definitions keyed by the SparkSQL column name
+    */
+  @InterfaceAudience.Private
+  def generateSchemaMappingMap(schemaMappingString:String):
+  java.util.HashMap[String, SchemaQualifierDefinition] = {
+    println(schemaMappingString)
+    try {
+      val columnDefinitions = schemaMappingString.split(',')
+      val resultingMap = new java.util.HashMap[String, SchemaQualifierDefinition]()
+      columnDefinitions.map(cd => {
+        val parts = cd.trim.split(' ')
+
+        //Make sure we get three parts
+        //<ColumnName> <ColumnType> <ColumnFamily:Qualifier>
+        if (parts.length == 3) {
+          val hbaseDefinitionParts = if (parts(2).charAt(0) == ':') {
+            Array[String]("rowkey", parts(0))
+          } else {
+            parts(2).split(':')
+          }
+          resultingMap.put(parts(0), new SchemaQualifierDefinition(parts(0),
+            parts(1), hbaseDefinitionParts(0), hbaseDefinitionParts(1)))
+        } else {
+          throw new IllegalArgumentException("Invalid value for schema mapping '" + cd +
+            "' should be '<columnName> <columnType> <columnFamily>:<qualifier>' " +
+            "for columns and '<columnName> <columnType> :<qualifier>' for rowKeys")
+        }
+      })
+      resultingMap
+    } catch {
+      case e:Exception => throw
+        new IllegalArgumentException("Invalid value for " + SCHEMA_COLUMNS_MAPPING_KEY +
+          " '" +
+          schemaMappingString + "'", e )
+    }
+  }
+}
+
+/**
+  * Construct to contains column data that spend SparkSQL and HBase
+  *
+  * @param columnName   SparkSQL column name
+  * @param colType      SparkSQL column type
+  * @param columnFamily HBase column family
+  * @param qualifier    HBase qualifier name
+  */
+@InterfaceAudience.Private
+case class SchemaQualifierDefinition(columnName:String,
+    colType:String,
+    columnFamily:String,
+    qualifier:String)
diff --git a/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala b/spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
new file mode 100644 (file)
index 0000000..6c06811
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark.datasources
+
+import java.util.ArrayList
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client._
+import org.apache.hadoop.hbase.spark._
+import org.apache.hadoop.hbase.spark.hbase._
+import org.apache.hadoop.hbase.spark.datasources.HBaseResources._
+import org.apache.hadoop.hbase.util.ShutdownHookManager
+import org.apache.spark.{SparkEnv, TaskContext, Partition}
+import org.apache.spark.rdd.RDD
+
+import scala.collection.mutable
+
+@InterfaceAudience.Private
+class HBaseTableScanRDD(relation: HBaseRelation,
+                       val hbaseContext: HBaseContext,
+                       @transient val filter: Option[SparkSQLPushDownFilter] = None,
+                        val columns: Seq[Field] = Seq.empty
+     ) extends RDD[Result](relation.sqlContext.sparkContext, Nil)
+  {
+  private def sparkConf = SparkEnv.get.conf
+  @transient var ranges = Seq.empty[Range]
+  @transient var points = Seq.empty[Array[Byte]]
+  def addPoint(p: Array[Byte]) {
+    points :+= p
+  }
+
+  def addRange(r: ScanRange) = {
+    val lower = if (r.lowerBound != null && r.lowerBound.length > 0) {
+      Some(Bound(r.lowerBound, r.isLowerBoundEqualTo))
+    } else {
+      None
+    }
+    val upper = if (r.upperBound != null && r.upperBound.length > 0) {
+      if (!r.isUpperBoundEqualTo) {
+        Some(Bound(r.upperBound, false))
+      } else {
+
+        // HBase stopRow is exclusive: therefore it DOESN'T act like isUpperBoundEqualTo
+        // by default.  So we need to add a new max byte to the stopRow key
+        val newArray = new Array[Byte](r.upperBound.length + 1)
+        System.arraycopy(r.upperBound, 0, newArray, 0, r.upperBound.length)
+
+        //New Max Bytes
+        newArray(r.upperBound.length) = ByteMin
+        Some(Bound(newArray, false))
+      }
+    } else {
+      None
+    }
+    ranges :+= Range(lower, upper)
+  }
+
+  override def getPartitions: Array[Partition] = {
+    val regions = RegionResource(relation)
+    var idx = 0
+    logDebug(s"There are ${regions.size} regions")
+    val ps = regions.flatMap { x =>
+      val rs = Ranges.and(Range(x), ranges)
+      val ps = Points.and(Range(x), points)
+      if (rs.size > 0 || ps.size > 0) {
+        if(log.isDebugEnabled) {
+          rs.foreach(x => logDebug(x.toString))
+        }
+        idx += 1
+        Some(HBaseScanPartition(idx - 1, x, rs, ps, SerializedFilter.toSerializedTypedFilter(filter)))
+      } else {
+        None
+      }
+    }.toArray
+    regions.release()
+    ShutdownHookManager.affixShutdownHook( new Thread() {
+      override def run() {
+        HBaseConnectionCache.close()
+      }
+    }, 0)
+    ps.asInstanceOf[Array[Partition]]
+  }
+
+  override def getPreferredLocations(split: Partition): Seq[String] = {
+    split.asInstanceOf[HBaseScanPartition].regions.server.map {
+      identity
+    }.toSeq
+  }
+
+  private def buildGets(
+      tbr: TableResource,
+      g: Seq[Array[Byte]],
+      filter: Option[SparkSQLPushDownFilter],
+      columns: Seq[Field],
+      hbaseContext: HBaseContext): Iterator[Result] = {
+    g.grouped(relation.bulkGetSize).flatMap{ x =>
+      val gets = new ArrayList[Get](x.size)
+      x.foreach{ y =>
+        val g = new Get(y)
+        handleTimeSemantics(g)
+        columns.foreach { d =>
+          if (!d.isRowKey) {
+            g.addColumn(d.cfBytes, d.colBytes)
+          }
+        }
+        filter.foreach(g.setFilter(_))
+        gets.add(g)
+      }
+      hbaseContext.applyCreds()
+      val tmp = tbr.get(gets)
+      rddResources.addResource(tmp)
+      toResultIterator(tmp)
+    }
+  }
+
+  private def toResultIterator(result: GetResource): Iterator[Result] = {
+    val iterator = new Iterator[Result] {
+      var idx = 0
+      var cur: Option[Result] = None
+      override def hasNext: Boolean = {
+        while(idx < result.length && cur.isEmpty) {
+          val r = result(idx)
+          idx += 1
+          if (!r.isEmpty) {
+            cur = Some(r)
+          }
+        }
+        if (cur.isEmpty) {
+          rddResources.release(result)
+        }
+        cur.isDefined