HIVE-18797 : ExprConstNodeDesc's getExprString should put appropriate qualifier with...
[hive.git] / ql / src / test / results / clientpositive / llap / vector_groupby_grouping_sets5.q.out
1 PREHOOK: query: CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
2 PREHOOK: type: CREATETABLE
3 PREHOOK: Output: database:default
4 PREHOOK: Output: default@T1_text
5 POSTHOOK: query: CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
6 POSTHOOK: type: CREATETABLE
7 POSTHOOK: Output: database:default
8 POSTHOOK: Output: default@T1_text
9 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text
10 PREHOOK: type: LOAD
11 #### A masked pattern was here ####
12 PREHOOK: Output: default@t1_text
13 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text
14 POSTHOOK: type: LOAD
15 #### A masked pattern was here ####
16 POSTHOOK: Output: default@t1_text
17 PREHOOK: query: CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text
18 PREHOOK: type: CREATETABLE_AS_SELECT
19 PREHOOK: Input: default@t1_text
20 PREHOOK: Output: database:default
21 PREHOOK: Output: default@T1
22 POSTHOOK: query: CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text
23 POSTHOOK: type: CREATETABLE_AS_SELECT
24 POSTHOOK: Input: default@t1_text
25 POSTHOOK: Output: database:default
26 POSTHOOK: Output: default@T1
27 POSTHOOK: Lineage: t1.a SIMPLE [(t1_text)t1_text.FieldSchema(name:a, type:string, comment:null), ]
28 POSTHOOK: Lineage: t1.b SIMPLE [(t1_text)t1_text.FieldSchema(name:b, type:string, comment:null), ]
29 POSTHOOK: Lineage: t1.c SIMPLE [(t1_text)t1_text.FieldSchema(name:c, type:string, comment:null), ]
30 t1_text.a       t1_text.b       t1_text.c
31 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
32 SELECT a, b, count(*) FROM
33 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
34 PREHOOK: type: QUERY
35 POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
36 SELECT a, b, count(*) FROM
37 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
38 POSTHOOK: type: QUERY
39 Explain
40 PLAN VECTORIZATION:
41   enabled: true
42   enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
43
44 STAGE DEPENDENCIES:
45   Stage-1 is a root stage
46   Stage-0 depends on stages: Stage-1
47
48 STAGE PLANS:
49   Stage: Stage-1
50     Tez
51 #### A masked pattern was here ####
52       Edges:
53         Reducer 2 <- Map 1 (SIMPLE_EDGE)
54         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
55 #### A masked pattern was here ####
56       Vertices:
57         Map 1 
58             Map Operator Tree:
59                 TableScan
60                   alias: t1
61                   Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
62                   TableScan Vectorization:
63                       native: true
64                       vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c:string, 3:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
65                   Select Operator
66                     expressions: a (type: string), b (type: string)
67                     outputColumnNames: a, b
68                     Select Vectorization:
69                         className: VectorSelectOperator
70                         native: true
71                         projectedOutputColumnNums: [0, 1]
72                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
73                     Group By Operator
74                       Group By Vectorization:
75                           className: VectorGroupByOperator
76                           groupByMode: HASH
77                           keyExpressions: col 0:string, col 1:string
78                           native: false
79                           vectorProcessingMode: HASH
80                           projectedOutputColumnNums: []
81                       keys: a (type: string), b (type: string)
82                       mode: hash
83                       outputColumnNames: _col0, _col1
84                       Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
85                       Reduce Output Operator
86                         key expressions: _col0 (type: string), _col1 (type: string)
87                         sort order: ++
88                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
89                         Reduce Sink Vectorization:
90                             className: VectorReduceSinkMultiKeyOperator
91                             keyColumnNums: [0, 1]
92                             native: true
93                             nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
94                             valueColumnNums: []
95                         Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
96             Execution mode: vectorized, llap
97             LLAP IO: all inputs
98             Map Vectorization:
99                 enabled: true
100                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
101                 inputFormatFeatureSupport: []
102                 featureSupportInUse: []
103                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
104                 allNative: false
105                 usesVectorUDFAdaptor: false
106                 vectorized: true
107                 rowBatchContext:
108                     dataColumnCount: 3
109                     includeColumns: [0, 1]
110                     dataColumns: a:string, b:string, c:string
111                     partitionColumnCount: 0
112                     scratchColumnTypeNames: []
113         Reducer 2 
114             Execution mode: vectorized, llap
115             Reduce Vectorization:
116                 enabled: true
117                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
118                 reduceColumnNullOrder: aa
119                 reduceColumnSortOrder: ++
120                 allNative: false
121                 usesVectorUDFAdaptor: false
122                 vectorized: true
123                 rowBatchContext:
124                     dataColumnCount: 2
125                     dataColumns: KEY._col0:string, KEY._col1:string
126                     partitionColumnCount: 0
127                     scratchColumnTypeNames: []
128             Reduce Operator Tree:
129               Group By Operator
130                 Group By Vectorization:
131                     className: VectorGroupByOperator
132                     groupByMode: MERGEPARTIAL
133                     keyExpressions: col 0:string, col 1:string
134                     native: false
135                     vectorProcessingMode: MERGE_PARTIAL
136                     projectedOutputColumnNums: []
137                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
138                 mode: mergepartial
139                 outputColumnNames: _col0, _col1
140                 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
141                 Group By Operator
142                   aggregations: count()
143                   Group By Vectorization:
144                       aggregators: VectorUDAFCountStar(*) -> bigint
145                       className: VectorGroupByOperator
146                       groupByMode: HASH
147                       keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 2:bigint
148                       native: false
149                       vectorProcessingMode: HASH
150                       projectedOutputColumnNums: [0]
151                   keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
152                   mode: hash
153                   outputColumnNames: _col0, _col1, _col2, _col3
154                   Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
155                   Reduce Output Operator
156                     key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
157                     sort order: +++
158                     Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
159                     Reduce Sink Vectorization:
160                         className: VectorReduceSinkMultiKeyOperator
161                         keyColumnNums: [0, 1, 2]
162                         native: true
163                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
164                         valueColumnNums: [3]
165                     Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
166                     value expressions: _col3 (type: bigint)
167         Reducer 3 
168             Execution mode: vectorized, llap
169             Reduce Vectorization:
170                 enabled: true
171                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
172                 reduceColumnNullOrder: aaa
173                 reduceColumnSortOrder: +++
174                 allNative: false
175                 usesVectorUDFAdaptor: false
176                 vectorized: true
177                 rowBatchContext:
178                     dataColumnCount: 4
179                     dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
180                     partitionColumnCount: 0
181                     scratchColumnTypeNames: []
182             Reduce Operator Tree:
183               Group By Operator
184                 aggregations: count(VALUE._col0)
185                 Group By Vectorization:
186                     aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
187                     className: VectorGroupByOperator
188                     groupByMode: MERGEPARTIAL
189                     keyExpressions: col 0:string, col 1:string, col 2:bigint
190                     native: false
191                     vectorProcessingMode: MERGE_PARTIAL
192                     projectedOutputColumnNums: [0]
193                 keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
194                 mode: mergepartial
195                 outputColumnNames: _col0, _col1, _col3
196                 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
197                 pruneGroupingSetId: true
198                 Select Operator
199                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
200                   outputColumnNames: _col0, _col1, _col2
201                   Select Vectorization:
202                       className: VectorSelectOperator
203                       native: true
204                       projectedOutputColumnNums: [0, 1, 2]
205                   Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
206                   File Output Operator
207                     compressed: false
208                     File Sink Vectorization:
209                         className: VectorFileSinkOperator
210                         native: false
211                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
212                     table:
213                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
214                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
215                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
216
217   Stage: Stage-0
218     Fetch Operator
219       limit: -1
220       Processor Tree:
221         ListSink
222
223 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
224 SELECT a, b, count(*) FROM
225 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by cube(a, b)
226 PREHOOK: type: QUERY
227 POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
228 SELECT a, b, count(*) FROM
229 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by cube(a, b)
230 POSTHOOK: type: QUERY
231 Explain
232 PLAN VECTORIZATION:
233   enabled: true
234   enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
235
236 STAGE DEPENDENCIES:
237   Stage-1 is a root stage
238   Stage-0 depends on stages: Stage-1
239
240 STAGE PLANS:
241   Stage: Stage-1
242     Tez
243 #### A masked pattern was here ####
244       Edges:
245         Reducer 2 <- Map 1 (SIMPLE_EDGE)
246         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
247 #### A masked pattern was here ####
248       Vertices:
249         Map 1 
250             Map Operator Tree:
251                 TableScan
252                   alias: t1
253                   Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
254                   TableScan Vectorization:
255                       native: true
256                       vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c:string, 3:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
257                   Select Operator
258                     expressions: a (type: string), b (type: string)
259                     outputColumnNames: a, b
260                     Select Vectorization:
261                         className: VectorSelectOperator
262                         native: true
263                         projectedOutputColumnNums: [0, 1]
264                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
265                     Group By Operator
266                       Group By Vectorization:
267                           className: VectorGroupByOperator
268                           groupByMode: HASH
269                           keyExpressions: col 0:string, col 1:string
270                           native: false
271                           vectorProcessingMode: HASH
272                           projectedOutputColumnNums: []
273                       keys: a (type: string), b (type: string)
274                       mode: hash
275                       outputColumnNames: _col0, _col1
276                       Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
277                       Reduce Output Operator
278                         key expressions: _col0 (type: string), _col1 (type: string)
279                         sort order: ++
280                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
281                         Reduce Sink Vectorization:
282                             className: VectorReduceSinkMultiKeyOperator
283                             keyColumnNums: [0, 1]
284                             native: true
285                             nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
286                             valueColumnNums: []
287                         Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
288             Execution mode: vectorized, llap
289             LLAP IO: all inputs
290             Map Vectorization:
291                 enabled: true
292                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
293                 inputFormatFeatureSupport: []
294                 featureSupportInUse: []
295                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
296                 allNative: false
297                 usesVectorUDFAdaptor: false
298                 vectorized: true
299                 rowBatchContext:
300                     dataColumnCount: 3
301                     includeColumns: [0, 1]
302                     dataColumns: a:string, b:string, c:string
303                     partitionColumnCount: 0
304                     scratchColumnTypeNames: []
305         Reducer 2 
306             Execution mode: vectorized, llap
307             Reduce Vectorization:
308                 enabled: true
309                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
310                 reduceColumnNullOrder: aa
311                 reduceColumnSortOrder: ++
312                 allNative: false
313                 usesVectorUDFAdaptor: false
314                 vectorized: true
315                 rowBatchContext:
316                     dataColumnCount: 2
317                     dataColumns: KEY._col0:string, KEY._col1:string
318                     partitionColumnCount: 0
319                     scratchColumnTypeNames: []
320             Reduce Operator Tree:
321               Group By Operator
322                 Group By Vectorization:
323                     className: VectorGroupByOperator
324                     groupByMode: MERGEPARTIAL
325                     keyExpressions: col 0:string, col 1:string
326                     native: false
327                     vectorProcessingMode: MERGE_PARTIAL
328                     projectedOutputColumnNums: []
329                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
330                 mode: mergepartial
331                 outputColumnNames: _col0, _col1
332                 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
333                 Group By Operator
334                   aggregations: count()
335                   Group By Vectorization:
336                       aggregators: VectorUDAFCountStar(*) -> bigint
337                       className: VectorGroupByOperator
338                       groupByMode: HASH
339                       keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 2:bigint
340                       native: false
341                       vectorProcessingMode: HASH
342                       projectedOutputColumnNums: [0]
343                   keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
344                   mode: hash
345                   outputColumnNames: _col0, _col1, _col2, _col3
346                   Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
347                   Reduce Output Operator
348                     key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
349                     sort order: +++
350                     Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
351                     Reduce Sink Vectorization:
352                         className: VectorReduceSinkMultiKeyOperator
353                         keyColumnNums: [0, 1, 2]
354                         native: true
355                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
356                         valueColumnNums: [3]
357                     Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
358                     value expressions: _col3 (type: bigint)
359         Reducer 3 
360             Execution mode: vectorized, llap
361             Reduce Vectorization:
362                 enabled: true
363                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
364                 reduceColumnNullOrder: aaa
365                 reduceColumnSortOrder: +++
366                 allNative: false
367                 usesVectorUDFAdaptor: false
368                 vectorized: true
369                 rowBatchContext:
370                     dataColumnCount: 4
371                     dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
372                     partitionColumnCount: 0
373                     scratchColumnTypeNames: []
374             Reduce Operator Tree:
375               Group By Operator
376                 aggregations: count(VALUE._col0)
377                 Group By Vectorization:
378                     aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
379                     className: VectorGroupByOperator
380                     groupByMode: MERGEPARTIAL
381                     keyExpressions: col 0:string, col 1:string, col 2:bigint
382                     native: false
383                     vectorProcessingMode: MERGE_PARTIAL
384                     projectedOutputColumnNums: [0]
385                 keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
386                 mode: mergepartial
387                 outputColumnNames: _col0, _col1, _col3
388                 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
389                 pruneGroupingSetId: true
390                 Select Operator
391                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
392                   outputColumnNames: _col0, _col1, _col2
393                   Select Vectorization:
394                       className: VectorSelectOperator
395                       native: true
396                       projectedOutputColumnNums: [0, 1, 2]
397                   Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
398                   File Output Operator
399                     compressed: false
400                     File Sink Vectorization:
401                         className: VectorFileSinkOperator
402                         native: false
403                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
404                     table:
405                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
406                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
407                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
408
409   Stage: Stage-0
410     Fetch Operator
411       limit: -1
412       Processor Tree:
413         ListSink
414
415 PREHOOK: query: SELECT a, b, count(*) FROM
416 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
417 PREHOOK: type: QUERY
418 PREHOOK: Input: default@t1
419 #### A masked pattern was here ####
420 POSTHOOK: query: SELECT a, b, count(*) FROM
421 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
422 POSTHOOK: type: QUERY
423 POSTHOOK: Input: default@t1
424 #### A masked pattern was here ####
425 a       b       _c2
426 1       1       1
427 1       NULL    1
428 2       2       1
429 2       3       1
430 2       NULL    2
431 3       2       1
432 3       NULL    1
433 5       2       1
434 5       NULL    1
435 8       1       1
436 8       NULL    1
437 NULL    1       2
438 NULL    2       3
439 NULL    3       1
440 NULL    NULL    6
441 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
442 SELECT a, b, count(*) FROM
443 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
444 PREHOOK: type: QUERY
445 POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
446 SELECT a, b, count(*) FROM
447 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
448 POSTHOOK: type: QUERY
449 Explain
450 PLAN VECTORIZATION:
451   enabled: true
452   enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
453
454 STAGE DEPENDENCIES:
455   Stage-1 is a root stage
456   Stage-0 depends on stages: Stage-1
457
458 STAGE PLANS:
459   Stage: Stage-1
460     Tez
461 #### A masked pattern was here ####
462       Edges:
463         Reducer 2 <- Map 1 (SIMPLE_EDGE)
464         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
465         Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
466 #### A masked pattern was here ####
467       Vertices:
468         Map 1 
469             Map Operator Tree:
470                 TableScan
471                   alias: t1
472                   Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
473                   TableScan Vectorization:
474                       native: true
475                       vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c:string, 3:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
476                   Select Operator
477                     expressions: a (type: string), b (type: string)
478                     outputColumnNames: a, b
479                     Select Vectorization:
480                         className: VectorSelectOperator
481                         native: true
482                         projectedOutputColumnNums: [0, 1]
483                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
484                     Group By Operator
485                       Group By Vectorization:
486                           className: VectorGroupByOperator
487                           groupByMode: HASH
488                           keyExpressions: col 0:string, col 1:string
489                           native: false
490                           vectorProcessingMode: HASH
491                           projectedOutputColumnNums: []
492                       keys: a (type: string), b (type: string)
493                       mode: hash
494                       outputColumnNames: _col0, _col1
495                       Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
496                       Reduce Output Operator
497                         key expressions: _col0 (type: string), _col1 (type: string)
498                         sort order: ++
499                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
500                         Reduce Sink Vectorization:
501                             className: VectorReduceSinkMultiKeyOperator
502                             keyColumnNums: [0, 1]
503                             native: true
504                             nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
505                             valueColumnNums: []
506                         Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
507             Execution mode: vectorized, llap
508             LLAP IO: all inputs
509             Map Vectorization:
510                 enabled: true
511                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
512                 inputFormatFeatureSupport: []
513                 featureSupportInUse: []
514                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
515                 allNative: false
516                 usesVectorUDFAdaptor: false
517                 vectorized: true
518                 rowBatchContext:
519                     dataColumnCount: 3
520                     includeColumns: [0, 1]
521                     dataColumns: a:string, b:string, c:string
522                     partitionColumnCount: 0
523                     scratchColumnTypeNames: []
524         Reducer 2 
525             Execution mode: vectorized, llap
526             Reduce Vectorization:
527                 enabled: true
528                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
529                 reduceColumnNullOrder: aa
530                 reduceColumnSortOrder: ++
531                 allNative: false
532                 usesVectorUDFAdaptor: false
533                 vectorized: true
534                 rowBatchContext:
535                     dataColumnCount: 2
536                     dataColumns: KEY._col0:string, KEY._col1:string
537                     partitionColumnCount: 0
538                     scratchColumnTypeNames: []
539             Reduce Operator Tree:
540               Group By Operator
541                 Group By Vectorization:
542                     className: VectorGroupByOperator
543                     groupByMode: MERGEPARTIAL
544                     keyExpressions: col 0:string, col 1:string
545                     native: false
546                     vectorProcessingMode: MERGE_PARTIAL
547                     projectedOutputColumnNums: []
548                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
549                 mode: mergepartial
550                 outputColumnNames: _col0, _col1
551                 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
552                 Group By Operator
553                   aggregations: count()
554                   Group By Vectorization:
555                       aggregators: VectorUDAFCountStar(*) -> bigint
556                       className: VectorGroupByOperator
557                       groupByMode: HASH
558                       keyExpressions: col 0:string, col 1:string
559                       native: false
560                       vectorProcessingMode: HASH
561                       projectedOutputColumnNums: [0]
562                   keys: _col0 (type: string), _col1 (type: string)
563                   mode: hash
564                   outputColumnNames: _col0, _col1, _col2
565                   Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
566                   Reduce Output Operator
567                     key expressions: _col0 (type: string), _col1 (type: string)
568                     sort order: ++
569                     Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
570                     Reduce Sink Vectorization:
571                         className: VectorReduceSinkMultiKeyOperator
572                         keyColumnNums: [0, 1]
573                         native: true
574                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
575                         valueColumnNums: [2]
576                     Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
577                     value expressions: _col2 (type: bigint)
578         Reducer 3 
579             Execution mode: vectorized, llap
580             Reduce Vectorization:
581                 enabled: true
582                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
583                 reduceColumnNullOrder: aa
584                 reduceColumnSortOrder: ++
585                 allNative: false
586                 usesVectorUDFAdaptor: false
587                 vectorized: true
588                 rowBatchContext:
589                     dataColumnCount: 3
590                     dataColumns: KEY._col0:string, KEY._col1:string, VALUE._col0:bigint
591                     partitionColumnCount: 0
592                     scratchColumnTypeNames: [bigint]
593             Reduce Operator Tree:
594               Group By Operator
595                 aggregations: count(VALUE._col0)
596                 Group By Vectorization:
597                     aggregators: VectorUDAFCountMerge(col 2:bigint) -> bigint
598                     className: VectorGroupByOperator
599                     groupByMode: PARTIALS
600                     keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
601                     native: false
602                     vectorProcessingMode: STREAMING
603                     projectedOutputColumnNums: [0]
604                 keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
605                 mode: partials
606                 outputColumnNames: _col0, _col1, _col2, _col3
607                 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
608                 Reduce Output Operator
609                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
610                   sort order: +++
611                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
612                   Reduce Sink Vectorization:
613                       className: VectorReduceSinkMultiKeyOperator
614                       keyColumnNums: [0, 1, 2]
615                       native: true
616                       nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
617                       valueColumnNums: [3]
618                   Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
619                   value expressions: _col3 (type: bigint)
620         Reducer 4 
621             Execution mode: vectorized, llap
622             Reduce Vectorization:
623                 enabled: true
624                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
625                 reduceColumnNullOrder: aaa
626                 reduceColumnSortOrder: +++
627                 allNative: false
628                 usesVectorUDFAdaptor: false
629                 vectorized: true
630                 rowBatchContext:
631                     dataColumnCount: 4
632                     dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
633                     partitionColumnCount: 0
634                     scratchColumnTypeNames: []
635             Reduce Operator Tree:
636               Group By Operator
637                 aggregations: count(VALUE._col0)
638                 Group By Vectorization:
639                     aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
640                     className: VectorGroupByOperator
641                     groupByMode: FINAL
642                     keyExpressions: col 0:string, col 1:string, col 2:bigint
643                     native: false
644                     vectorProcessingMode: STREAMING
645                     projectedOutputColumnNums: [0]
646                 keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
647                 mode: final
648                 outputColumnNames: _col0, _col1, _col3
649                 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
650                 pruneGroupingSetId: true
651                 Select Operator
652                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
653                   outputColumnNames: _col0, _col1, _col2
654                   Select Vectorization:
655                       className: VectorSelectOperator
656                       native: true
657                       projectedOutputColumnNums: [0, 1, 2]
658                   Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
659                   File Output Operator
660                     compressed: false
661                     File Sink Vectorization:
662                         className: VectorFileSinkOperator
663                         native: false
664                     Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
665                     table:
666                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
667                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
668                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
669
670   Stage: Stage-0
671     Fetch Operator
672       limit: -1
673       Processor Tree:
674         ListSink
675
676 PREHOOK: query: SELECT a, b, count(*) FROM
677 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
678 PREHOOK: type: QUERY
679 PREHOOK: Input: default@t1
680 #### A masked pattern was here ####
681 POSTHOOK: query: SELECT a, b, count(*) FROM
682 (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube
683 POSTHOOK: type: QUERY
684 POSTHOOK: Input: default@t1
685 #### A masked pattern was here ####
686 a       b       _c2
687 1       1       1
688 1       NULL    1
689 2       2       1
690 2       3       1
691 2       NULL    2
692 3       2       1
693 3       NULL    1
694 5       2       1
695 5       NULL    1
696 8       1       1
697 8       NULL    1
698 NULL    1       2
699 NULL    2       3
700 NULL    3       1
701 NULL    NULL    6