HIVE-18797 : ExprConstNodeDesc's getExprString should put appropriate qualifier with...
[hive.git] / ql / src / test / results / clientpositive / druid_basic2.q.out
1 PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
2 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
3 TBLPROPERTIES ("druid.datasource" = "wikipedia")
4 PREHOOK: type: CREATETABLE
5 PREHOOK: Output: database:default
6 PREHOOK: Output: default@druid_table_1
7 POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1
8 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
9 TBLPROPERTIES ("druid.datasource" = "wikipedia")
10 POSTHOOK: type: CREATETABLE
11 POSTHOOK: Output: database:default
12 POSTHOOK: Output: default@druid_table_1
13 PREHOOK: query: DESCRIBE FORMATTED druid_table_1
14 PREHOOK: type: DESCTABLE
15 PREHOOK: Input: default@druid_table_1
16 POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
17 POSTHOOK: type: DESCTABLE
18 POSTHOOK: Input: default@druid_table_1
19 # col_name              data_type               comment             
20 __time                  timestamp with local time zone  from deserializer   
21 robot                   string                  from deserializer   
22 namespace               string                  from deserializer   
23 anonymous               string                  from deserializer   
24 unpatrolled             string                  from deserializer   
25 page                    string                  from deserializer   
26 language                string                  from deserializer   
27 newpage                 string                  from deserializer   
28 user                    string                  from deserializer   
29 count                   float                   from deserializer   
30 added                   float                   from deserializer   
31 delta                   float                   from deserializer   
32 variation               float                   from deserializer   
33 deleted                 float                   from deserializer   
34                  
35 # Detailed Table Information             
36 Database:               default                  
37 #### A masked pattern was here ####
38 Retention:              0                        
39 #### A masked pattern was here ####
40 Table Type:             EXTERNAL_TABLE           
41 Table Parameters:                
42         COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}}
43         EXTERNAL                TRUE                
44         druid.datasource        wikipedia           
45         numFiles                0                   
46         numRows                 0                   
47         rawDataSize             0                   
48         storage_handler         org.apache.hadoop.hive.druid.QTestDruidStorageHandler
49         totalSize               0                   
50 #### A masked pattern was here ####
51                  
52 # Storage Information            
53 SerDe Library:          org.apache.hadoop.hive.druid.QTestDruidSerDe     
54 InputFormat:            null                     
55 OutputFormat:           null                     
56 Compressed:             No                       
57 Num Buckets:            -1                       
58 Bucket Columns:         []                       
59 Sort Columns:           []                       
60 Storage Desc Params:             
61         serialization.format    1                   
62 PREHOOK: query: EXPLAIN EXTENDED
63 SELECT robot FROM druid_table_1
64 PREHOOK: type: QUERY
65 POSTHOOK: query: EXPLAIN EXTENDED
66 SELECT robot FROM druid_table_1
67 POSTHOOK: type: QUERY
68 STAGE DEPENDENCIES:
69   Stage-0 is a root stage
70
71 STAGE PLANS:
72   Stage: Stage-0
73     Fetch Operator
74       limit: -1
75       Processor Tree:
76         TableScan
77           alias: druid_table_1
78           properties:
79             druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
80             druid.query.type select
81           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
82           GatherStats: false
83           Select Operator
84             expressions: robot (type: string)
85             outputColumnNames: _col0
86             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
87             ListSink
88
89 PREHOOK: query: EXPLAIN EXTENDED
90 SELECT delta FROM druid_table_1
91 PREHOOK: type: QUERY
92 POSTHOOK: query: EXPLAIN EXTENDED
93 SELECT delta FROM druid_table_1
94 POSTHOOK: type: QUERY
95 STAGE DEPENDENCIES:
96   Stage-0 is a root stage
97
98 STAGE PLANS:
99   Stage: Stage-0
100     Fetch Operator
101       limit: -1
102       Processor Tree:
103         TableScan
104           alias: druid_table_1
105           properties:
106             druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["delta"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
107             druid.query.type select
108           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
109           GatherStats: false
110           Select Operator
111             expressions: delta (type: float)
112             outputColumnNames: _col0
113             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
114             ListSink
115
116 PREHOOK: query: EXPLAIN EXTENDED
117 SELECT robot
118 FROM druid_table_1
119 WHERE language = 'en'
120 PREHOOK: type: QUERY
121 POSTHOOK: query: EXPLAIN EXTENDED
122 SELECT robot
123 FROM druid_table_1
124 WHERE language = 'en'
125 POSTHOOK: type: QUERY
126 STAGE DEPENDENCIES:
127   Stage-0 is a root stage
128
129 STAGE PLANS:
130   Stage: Stage-0
131     Fetch Operator
132       limit: -1
133       Processor Tree:
134         TableScan
135           alias: druid_table_1
136           properties:
137             druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
138             druid.query.type select
139           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
140           GatherStats: false
141           Select Operator
142             expressions: robot (type: string)
143             outputColumnNames: _col0
144             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
145             ListSink
146
147 PREHOOK: query: EXPLAIN EXTENDED
148 SELECT DISTINCT robot
149 FROM druid_table_1
150 WHERE language = 'en'
151 PREHOOK: type: QUERY
152 POSTHOOK: query: EXPLAIN EXTENDED
153 SELECT DISTINCT robot
154 FROM druid_table_1
155 WHERE language = 'en'
156 POSTHOOK: type: QUERY
157 STAGE DEPENDENCIES:
158   Stage-0 is a root stage
159
160 STAGE PLANS:
161   Stage: Stage-0
162     Fetch Operator
163       limit: -1
164       Processor Tree:
165         TableScan
166           alias: druid_table_1
167           properties:
168             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
169             druid.query.type groupBy
170           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
171           GatherStats: false
172           Select Operator
173             expressions: robot (type: string)
174             outputColumnNames: _col0
175             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
176             ListSink
177
178 PREHOOK: query: EXPLAIN EXTENDED
179 SELECT a.robot, b.language
180 FROM
181 (
182   (SELECT robot, language
183   FROM druid_table_1) a
184   JOIN
185   (SELECT language
186   FROM druid_table_1) b
187   ON a.language = b.language
188 )
189 PREHOOK: type: QUERY
190 POSTHOOK: query: EXPLAIN EXTENDED
191 SELECT a.robot, b.language
192 FROM
193 (
194   (SELECT robot, language
195   FROM druid_table_1) a
196   JOIN
197   (SELECT language
198   FROM druid_table_1) b
199   ON a.language = b.language
200 )
201 POSTHOOK: type: QUERY
202 STAGE DEPENDENCIES:
203   Stage-1 is a root stage
204   Stage-0 depends on stages: Stage-1
205
206 STAGE PLANS:
207   Stage: Stage-1
208     Map Reduce
209       Map Operator Tree:
210           TableScan
211             alias: druid_table_1
212             properties:
213               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["robot","language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
214               druid.query.type select
215             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
216             GatherStats: false
217             Select Operator
218               expressions: robot (type: string), language (type: string)
219               outputColumnNames: _col0, _col1
220               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
221               Reduce Output Operator
222                 key expressions: _col1 (type: string)
223                 null sort order: a
224                 sort order: +
225                 Map-reduce partition columns: _col1 (type: string)
226                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
227                 tag: 0
228                 value expressions: _col0 (type: string)
229                 auto parallelism: false
230           TableScan
231             alias: druid_table_1
232             properties:
233               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
234               druid.query.type select
235             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
236             GatherStats: false
237             Reduce Output Operator
238               key expressions: language (type: string)
239               null sort order: a
240               sort order: +
241               Map-reduce partition columns: language (type: string)
242               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
243               tag: 1
244               auto parallelism: false
245       Path -> Alias:
246 #### A masked pattern was here ####
247       Path -> Partition:
248 #### A masked pattern was here ####
249           Partition
250             base file name: druid_table_1
251             input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
252             output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
253             properties:
254               COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
255               EXTERNAL TRUE
256               bucket_count -1
257               column.name.delimiter ,
258               columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
259               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
260               columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
261               druid.datasource wikipedia
262               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["robot","language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
263               druid.query.type select
264 #### A masked pattern was here ####
265               name default.druid_table_1
266               numFiles 0
267               numRows 0
268               rawDataSize 0
269               serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
270               serialization.format 1
271               serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
272               storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
273               totalSize 0
274 #### A masked pattern was here ####
275             serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
276           
277               input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
278               output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
279               properties:
280                 COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
281                 EXTERNAL TRUE
282                 bucket_count -1
283                 column.name.delimiter ,
284                 columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
285                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
286                 columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
287                 druid.datasource wikipedia
288                 druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
289                 druid.query.type select
290 #### A masked pattern was here ####
291                 name default.druid_table_1
292                 numFiles 0
293                 numRows 0
294                 rawDataSize 0
295                 serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
296                 serialization.format 1
297                 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
298                 storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
299                 totalSize 0
300 #### A masked pattern was here ####
301               serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
302               name: default.druid_table_1
303             name: default.druid_table_1
304       Truncated Path -> Alias:
305         /druid_table_1 [$hdt$_0:druid_table_1, druid_table_1]
306       Needs Tagging: true
307       Reduce Operator Tree:
308         Join Operator
309           condition map:
310                Inner Join 0 to 1
311           keys:
312             0 _col1 (type: string)
313             1 language (type: string)
314           outputColumnNames: _col0, _col3
315           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
316           Select Operator
317             expressions: _col0 (type: string), _col3 (type: string)
318             outputColumnNames: _col0, _col1
319             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
320             File Output Operator
321               compressed: false
322               GlobalTableId: 0
323 #### A masked pattern was here ####
324               NumFilesPerFileSink: 1
325               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
326 #### A masked pattern was here ####
327               table:
328                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
329                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
330                   properties:
331                     columns _col0,_col1
332                     columns.types string:string
333                     escape.delim \
334                     hive.serialization.extend.additional.nesting.levels true
335                     serialization.escape.crlf true
336                     serialization.format 1
337                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
338                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
339               TotalFiles: 1
340               GatherStats: false
341               MultiFileSpray: false
342
343   Stage: Stage-0
344     Fetch Operator
345       limit: -1
346       Processor Tree:
347         ListSink
348
349 Warning: Shuffle Join JOIN[5][tables = [druid_table_1, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
350 PREHOOK: query: EXPLAIN EXTENDED
351 SELECT a.robot, b.language
352 FROM
353 (
354   (SELECT robot, language
355   FROM druid_table_1
356   WHERE language = 'en') a
357   JOIN
358   (SELECT language
359   FROM druid_table_1) b
360   ON a.language = b.language
361 )
362 PREHOOK: type: QUERY
363 POSTHOOK: query: EXPLAIN EXTENDED
364 SELECT a.robot, b.language
365 FROM
366 (
367   (SELECT robot, language
368   FROM druid_table_1
369   WHERE language = 'en') a
370   JOIN
371   (SELECT language
372   FROM druid_table_1) b
373   ON a.language = b.language
374 )
375 POSTHOOK: type: QUERY
376 STAGE DEPENDENCIES:
377   Stage-1 is a root stage
378   Stage-0 depends on stages: Stage-1
379
380 STAGE PLANS:
381   Stage: Stage-1
382     Map Reduce
383       Map Operator Tree:
384           TableScan
385             alias: druid_table_1
386             properties:
387               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
388               druid.query.type select
389             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
390             GatherStats: false
391             Select Operator
392               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
393               Reduce Output Operator
394                 null sort order: 
395                 sort order: 
396                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
397                 tag: 1
398                 auto parallelism: false
399           TableScan
400             alias: druid_table_1
401             properties:
402               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
403               druid.query.type select
404             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
405             GatherStats: false
406             Reduce Output Operator
407               null sort order: 
408               sort order: 
409               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
410               tag: 0
411               value expressions: robot (type: string)
412               auto parallelism: false
413       Path -> Alias:
414 #### A masked pattern was here ####
415       Path -> Partition:
416 #### A masked pattern was here ####
417           Partition
418             base file name: druid_table_1
419             input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
420             output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
421             properties:
422               COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
423               EXTERNAL TRUE
424               bucket_count -1
425               column.name.delimiter ,
426               columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
427               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
428               columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
429               druid.datasource wikipedia
430               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
431               druid.query.type select
432 #### A masked pattern was here ####
433               name default.druid_table_1
434               numFiles 0
435               numRows 0
436               rawDataSize 0
437               serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
438               serialization.format 1
439               serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
440               storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
441               totalSize 0
442 #### A masked pattern was here ####
443             serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
444           
445               input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
446               output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
447               properties:
448                 COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
449                 EXTERNAL TRUE
450                 bucket_count -1
451                 column.name.delimiter ,
452                 columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
453                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
454                 columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
455                 druid.datasource wikipedia
456                 druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
457                 druid.query.type select
458 #### A masked pattern was here ####
459                 name default.druid_table_1
460                 numFiles 0
461                 numRows 0
462                 rawDataSize 0
463                 serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
464                 serialization.format 1
465                 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
466                 storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
467                 totalSize 0
468 #### A masked pattern was here ####
469               serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
470               name: default.druid_table_1
471             name: default.druid_table_1
472       Truncated Path -> Alias:
473         /druid_table_1 [$hdt$_0:druid_table_1, druid_table_1]
474       Needs Tagging: true
475       Reduce Operator Tree:
476         Join Operator
477           condition map:
478                Inner Join 0 to 1
479           keys:
480             0 
481             1 
482           outputColumnNames: _col1
483           Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE
484           Select Operator
485             expressions: _col1 (type: string), 'en' (type: string)
486             outputColumnNames: _col0, _col1
487             Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE
488             File Output Operator
489               compressed: false
490               GlobalTableId: 0
491 #### A masked pattern was here ####
492               NumFilesPerFileSink: 1
493               Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE
494 #### A masked pattern was here ####
495               table:
496                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
497                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
498                   properties:
499                     columns _col0,_col1
500                     columns.types string:string
501                     escape.delim \
502                     hive.serialization.extend.additional.nesting.levels true
503                     serialization.escape.crlf true
504                     serialization.format 1
505                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
506                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
507               TotalFiles: 1
508               GatherStats: false
509               MultiFileSpray: false
510
511   Stage: Stage-0
512     Fetch Operator
513       limit: -1
514       Processor Tree:
515         ListSink
516
517 PREHOOK: query: EXPLAIN EXTENDED
518 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s
519 FROM druid_table_1
520 GROUP BY robot, language, floor_day(`__time`)
521 ORDER BY CAST(robot AS INTEGER) ASC, m DESC
522 LIMIT 10
523 PREHOOK: type: QUERY
524 POSTHOOK: query: EXPLAIN EXTENDED
525 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s
526 FROM druid_table_1
527 GROUP BY robot, language, floor_day(`__time`)
528 ORDER BY CAST(robot AS INTEGER) ASC, m DESC
529 LIMIT 10
530 POSTHOOK: type: QUERY
531 STAGE DEPENDENCIES:
532   Stage-1 is a root stage
533   Stage-0 depends on stages: Stage-1
534
535 STAGE PLANS:
536   Stage: Stage-1
537     Map Reduce
538       Map Operator Tree:
539           TableScan
540             alias: druid_table_1
541             properties:
542               druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
543               druid.query.type groupBy
544             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
545             GatherStats: false
546             Select Operator
547               expressions: robot (type: string), floor_day (type: timestamp with local time zone), $f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
548               outputColumnNames: _col0, _col1, _col2, _col3, _col4
549               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
550               Reduce Output Operator
551                 key expressions: _col4 (type: int), _col2 (type: float)
552                 null sort order: az
553                 sort order: +-
554                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
555                 tag: -1
556                 TopN: 10
557                 TopN Hash Memory Usage: 0.1
558                 value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: float)
559                 auto parallelism: false
560       Path -> Alias:
561 #### A masked pattern was here ####
562       Path -> Partition:
563 #### A masked pattern was here ####
564           Partition
565             base file name: druid_table_1
566             input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
567             output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
568             properties:
569               COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
570               EXTERNAL TRUE
571               bucket_count -1
572               column.name.delimiter ,
573               columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
574               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
575               columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
576               druid.datasource wikipedia
577               druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
578               druid.query.type groupBy
579 #### A masked pattern was here ####
580               name default.druid_table_1
581               numFiles 0
582               numRows 0
583               rawDataSize 0
584               serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
585               serialization.format 1
586               serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
587               storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
588               totalSize 0
589 #### A masked pattern was here ####
590             serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
591           
592               input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
593               output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
594               properties:
595                 COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
596                 EXTERNAL TRUE
597                 bucket_count -1
598                 column.name.delimiter ,
599                 columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
600                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
601                 columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
602                 druid.datasource wikipedia
603                 druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
604                 druid.query.type groupBy
605 #### A masked pattern was here ####
606                 name default.druid_table_1
607                 numFiles 0
608                 numRows 0
609                 rawDataSize 0
610                 serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
611                 serialization.format 1
612                 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
613                 storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
614                 totalSize 0
615 #### A masked pattern was here ####
616               serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
617               name: default.druid_table_1
618             name: default.druid_table_1
619       Truncated Path -> Alias:
620         /druid_table_1 [$hdt$_0:druid_table_1]
621       Needs Tagging: false
622       Reduce Operator Tree:
623         Select Operator
624           expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: float)
625           outputColumnNames: _col0, _col1, _col2, _col3
626           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
627           Limit
628             Number of rows: 10
629             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
630             File Output Operator
631               compressed: false
632               GlobalTableId: 0
633 #### A masked pattern was here ####
634               NumFilesPerFileSink: 1
635               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
636 #### A masked pattern was here ####
637               table:
638                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
639                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
640                   properties:
641                     columns _col0,_col1,_col2,_col3
642                     columns.types string:timestamp with local time zone:float:float
643                     escape.delim \
644                     hive.serialization.extend.additional.nesting.levels true
645                     serialization.escape.crlf true
646                     serialization.format 1
647                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
648                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
649               TotalFiles: 1
650               GatherStats: false
651               MultiFileSpray: false
652
653   Stage: Stage-0
654     Fetch Operator
655       limit: -1
656       Processor Tree:
657         ListSink
658
659 PREHOOK: query: EXPLAIN
660 SELECT substring(namespace, CAST(deleted AS INT), 4)
661 FROM druid_table_1
662 PREHOOK: type: QUERY
663 POSTHOOK: query: EXPLAIN
664 SELECT substring(namespace, CAST(deleted AS INT), 4)
665 FROM druid_table_1
666 POSTHOOK: type: QUERY
667 STAGE DEPENDENCIES:
668   Stage-1 is a root stage
669   Stage-0 depends on stages: Stage-1
670
671 STAGE PLANS:
672   Stage: Stage-1
673     Map Reduce
674       Map Operator Tree:
675           TableScan
676             alias: druid_table_1
677             properties:
678               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["namespace"],"metrics":["deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
679               druid.query.type select
680             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
681             Select Operator
682               expressions: substring(namespace, UDFToInteger(deleted), 4) (type: string)
683               outputColumnNames: _col0
684               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
685               File Output Operator
686                 compressed: false
687                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
688                 table:
689                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
690                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
691                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
692
693   Stage: Stage-0
694     Fetch Operator
695       limit: -1
696       Processor Tree:
697         ListSink
698
699 PREHOOK: query: EXPLAIN
700 SELECT robot, floor_day(`__time`)
701 FROM druid_table_1
702 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
703 GROUP BY robot, floor_day(`__time`)
704 ORDER BY robot
705 LIMIT 10
706 PREHOOK: type: QUERY
707 POSTHOOK: query: EXPLAIN
708 SELECT robot, floor_day(`__time`)
709 FROM druid_table_1
710 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
711 GROUP BY robot, floor_day(`__time`)
712 ORDER BY robot
713 LIMIT 10
714 POSTHOOK: type: QUERY
715 STAGE DEPENDENCIES:
716   Stage-1 is a root stage
717   Stage-2 depends on stages: Stage-1
718   Stage-0 depends on stages: Stage-2
719
720 STAGE PLANS:
721   Stage: Stage-1
722     Map Reduce
723       Map Operator Tree:
724           TableScan
725             alias: druid_table_1
726             filterExpr: floor_day(__time) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
727             properties:
728               druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
729               druid.query.type select
730             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
731             Filter Operator
732               predicate: floor_day(__time) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
733               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
734               Select Operator
735                 expressions: robot (type: string), floor_day(__time) (type: timestamp with local time zone)
736                 outputColumnNames: _col0, _col1
737                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
738                 Group By Operator
739                   keys: _col0 (type: string), _col1 (type: timestamp with local time zone)
740                   mode: hash
741                   outputColumnNames: _col0, _col1
742                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
743                   Reduce Output Operator
744                     key expressions: _col0 (type: string), _col1 (type: timestamp with local time zone)
745                     sort order: ++
746                     Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp with local time zone)
747                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
748                     TopN Hash Memory Usage: 0.1
749       Reduce Operator Tree:
750         Group By Operator
751           keys: KEY._col0 (type: string), KEY._col1 (type: timestamp with local time zone)
752           mode: mergepartial
753           outputColumnNames: _col0, _col1
754           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
755           File Output Operator
756             compressed: false
757             table:
758                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
759                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
760                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
761
762   Stage: Stage-2
763     Map Reduce
764       Map Operator Tree:
765           TableScan
766             Reduce Output Operator
767               key expressions: _col0 (type: string)
768               sort order: +
769               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
770               TopN Hash Memory Usage: 0.1
771               value expressions: _col1 (type: timestamp with local time zone)
772       Reduce Operator Tree:
773         Select Operator
774           expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp with local time zone)
775           outputColumnNames: _col0, _col1
776           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
777           Limit
778             Number of rows: 10
779             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
780             File Output Operator
781               compressed: false
782               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
783               table:
784                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
785                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
786                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
787
788   Stage: Stage-0
789     Fetch Operator
790       limit: 10
791       Processor Tree:
792         ListSink
793
794 PREHOOK: query: EXPLAIN
795 SELECT robot, `__time`
796 FROM druid_table_1
797 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
798 GROUP BY robot, `__time`
799 ORDER BY robot
800 LIMIT 10
801 PREHOOK: type: QUERY
802 POSTHOOK: query: EXPLAIN
803 SELECT robot, `__time`
804 FROM druid_table_1
805 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
806 GROUP BY robot, `__time`
807 ORDER BY robot
808 LIMIT 10
809 POSTHOOK: type: QUERY
810 STAGE DEPENDENCIES:
811   Stage-1 is a root stage
812   Stage-0 depends on stages: Stage-1
813
814 STAGE PLANS:
815   Stage: Stage-1
816     Map Reduce
817       Map Operator Tree:
818           TableScan
819             alias: druid_table_1
820             filterExpr: floor_day(extract) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
821             properties:
822               druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
823               druid.query.type groupBy
824             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
825             Filter Operator
826               predicate: floor_day(extract) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
827               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
828               Select Operator
829                 expressions: robot (type: string), extract (type: timestamp with local time zone)
830                 outputColumnNames: _col0, _col1
831                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
832                 Reduce Output Operator
833                   key expressions: _col0 (type: string)
834                   sort order: +
835                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
836                   TopN Hash Memory Usage: 0.1
837                   value expressions: _col1 (type: timestamp with local time zone)
838       Reduce Operator Tree:
839         Select Operator
840           expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp with local time zone)
841           outputColumnNames: _col0, _col1
842           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
843           Limit
844             Number of rows: 10
845             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
846             File Output Operator
847               compressed: false
848               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
849               table:
850                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
851                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
852                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
853
854   Stage: Stage-0
855     Fetch Operator
856       limit: 10
857       Processor Tree:
858         ListSink
859
860 PREHOOK: query: EXPLAIN
861 SELECT robot, floor_day(`__time`)
862 FROM druid_table_1
863 WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
864 GROUP BY robot, floor_day(`__time`)
865 ORDER BY robot
866 LIMIT 10
867 PREHOOK: type: QUERY
868 POSTHOOK: query: EXPLAIN
869 SELECT robot, floor_day(`__time`)
870 FROM druid_table_1
871 WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
872 GROUP BY robot, floor_day(`__time`)
873 ORDER BY robot
874 LIMIT 10
875 POSTHOOK: type: QUERY
876 STAGE DEPENDENCIES:
877   Stage-0 is a root stage
878
879 STAGE PLANS:
880   Stage: Stage-0
881     Fetch Operator
882       limit: -1
883       Processor Tree:
884         TableScan
885           alias: druid_table_1
886           properties:
887             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]}
888             druid.query.type groupBy
889           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
890           Select Operator
891             expressions: robot (type: string), floor_day (type: timestamp with local time zone)
892             outputColumnNames: _col0, _col1
893             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
894             ListSink
895
896 PREHOOK: query: EXPLAIN EXTENDED
897 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s
898 FROM druid_table_1
899 GROUP BY robot, language, floor_day(`__time`)
900 ORDER BY CAST(robot AS INTEGER) ASC, m DESC
901 LIMIT 10
902 PREHOOK: type: QUERY
903 POSTHOOK: query: EXPLAIN EXTENDED
904 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s
905 FROM druid_table_1
906 GROUP BY robot, language, floor_day(`__time`)
907 ORDER BY CAST(robot AS INTEGER) ASC, m DESC
908 LIMIT 10
909 POSTHOOK: type: QUERY
910 STAGE DEPENDENCIES:
911   Stage-1 is a root stage
912   Stage-2 depends on stages: Stage-1
913   Stage-0 depends on stages: Stage-2
914
915 STAGE PLANS:
916   Stage: Stage-1
917     Map Reduce
918       Map Operator Tree:
919           TableScan
920             alias: druid_table_1
921             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
922             GatherStats: false
923             Select Operator
924               expressions: __time (type: timestamp with local time zone), robot (type: string), language (type: string), added (type: float), delta (type: float)
925               outputColumnNames: __time, robot, language, added, delta
926               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
927               Group By Operator
928                 aggregations: max(added), sum(delta)
929                 keys: robot (type: string), language (type: string), floor_day(__time) (type: timestamp with local time zone)
930                 mode: hash
931                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
932                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
933                 Reduce Output Operator
934                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp with local time zone)
935                   null sort order: aaa
936                   sort order: +++
937                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp with local time zone)
938                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
939                   tag: -1
940                   value expressions: _col3 (type: float), _col4 (type: double)
941                   auto parallelism: false
942       Path -> Alias:
943 #### A masked pattern was here ####
944       Path -> Partition:
945 #### A masked pattern was here ####
946           Partition
947             base file name: druid_table_1
948             input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
949             output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
950             properties:
951               COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
952               EXTERNAL TRUE
953               bucket_count -1
954               column.name.delimiter ,
955               columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
956               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
957               columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
958               druid.datasource wikipedia
959 #### A masked pattern was here ####
960               name default.druid_table_1
961               numFiles 0
962               numRows 0
963               rawDataSize 0
964               serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
965               serialization.format 1
966               serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
967               storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
968               totalSize 0
969 #### A masked pattern was here ####
970             serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
971           
972               input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
973               output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
974               properties:
975                 COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
976                 EXTERNAL TRUE
977                 bucket_count -1
978                 column.name.delimiter ,
979                 columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
980                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
981                 columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
982                 druid.datasource wikipedia
983 #### A masked pattern was here ####
984                 name default.druid_table_1
985                 numFiles 0
986                 numRows 0
987                 rawDataSize 0
988                 serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
989                 serialization.format 1
990                 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
991                 storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
992                 totalSize 0
993 #### A masked pattern was here ####
994               serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
995               name: default.druid_table_1
996             name: default.druid_table_1
997       Truncated Path -> Alias:
998         /druid_table_1 [druid_table_1]
999       Needs Tagging: false
1000       Reduce Operator Tree:
1001         Group By Operator
1002           aggregations: max(VALUE._col0), sum(VALUE._col1)
1003           keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: timestamp with local time zone)
1004           mode: mergepartial
1005           outputColumnNames: _col0, _col1, _col2, _col3, _col4
1006           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
1007           Select Operator
1008             expressions: _col0 (type: string), _col2 (type: timestamp with local time zone), _col3 (type: float), _col4 (type: double)
1009             outputColumnNames: _col0, _col1, _col2, _col3
1010             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
1011             File Output Operator
1012               compressed: false
1013               GlobalTableId: 0
1014 #### A masked pattern was here ####
1015               NumFilesPerFileSink: 1
1016               table:
1017                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
1018                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
1019                   properties:
1020                     column.name.delimiter ,
1021                     columns _col0,_col1,_col2,_col3
1022                     columns.types string,timestamp with local time zone,float,double
1023                     escape.delim \
1024                     serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
1025                   serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
1026               TotalFiles: 1
1027               GatherStats: false
1028               MultiFileSpray: false
1029
1030   Stage: Stage-2
1031     Map Reduce
1032       Map Operator Tree:
1033           TableScan
1034             GatherStats: false
1035             Reduce Output Operator
1036               key expressions: UDFToInteger(_col0) (type: int), _col2 (type: float)
1037               null sort order: az
1038               sort order: +-
1039               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
1040               tag: -1
1041               TopN: 10
1042               TopN Hash Memory Usage: 0.1
1043               value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: double)
1044               auto parallelism: false
1045       Path -> Alias:
1046 #### A masked pattern was here ####
1047       Path -> Partition:
1048 #### A masked pattern was here ####
1049           Partition
1050             base file name: -mr-10003
1051             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
1052             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
1053             properties:
1054               column.name.delimiter ,
1055               columns _col0,_col1,_col2,_col3
1056               columns.types string,timestamp with local time zone,float,double
1057               escape.delim \
1058               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
1059             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
1060           
1061               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
1062               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
1063               properties:
1064                 column.name.delimiter ,
1065                 columns _col0,_col1,_col2,_col3
1066                 columns.types string,timestamp with local time zone,float,double
1067                 escape.delim \
1068                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
1069               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
1070       Truncated Path -> Alias:
1071 #### A masked pattern was here ####
1072       Needs Tagging: false
1073       Reduce Operator Tree:
1074         Select Operator
1075           expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: double)
1076           outputColumnNames: _col0, _col1, _col2, _col3
1077           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
1078           Limit
1079             Number of rows: 10
1080             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
1081             File Output Operator
1082               compressed: false
1083               GlobalTableId: 0
1084 #### A masked pattern was here ####
1085               NumFilesPerFileSink: 1
1086               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
1087 #### A masked pattern was here ####
1088               table:
1089                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
1090                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
1091                   properties:
1092                     columns _col0,_col1,_col2,_col3
1093                     columns.types string:timestamp with local time zone:float:double
1094                     escape.delim \
1095                     hive.serialization.extend.additional.nesting.levels true
1096                     serialization.escape.crlf true
1097                     serialization.format 1
1098                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
1099                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
1100               TotalFiles: 1
1101               GatherStats: false
1102               MultiFileSpray: false
1103
1104   Stage: Stage-0
1105     Fetch Operator
1106       limit: 10
1107       Processor Tree:
1108         ListSink
1109