Skip to content

Instantly share code, notes, and snippets.

@sameeragarwal
Created March 22, 2016 21:02
Show Gist options
  • Select an option

  • Save sameeragarwal/4b46dd53fe79f8e05abd to your computer and use it in GitHub Desktop.

Select an option

Save sameeragarwal/4b46dd53fe79f8e05abd to your computer and use it in GitHub Desktop.
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#69L])
/* 007 */ +- Project
/* 008 */ +- Range 0, 1, 1, 20971520, [id#60L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private boolean agg_bufIsNull;
/* 014 */ private long agg_bufValue;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 017 */ private boolean range_initRange;
/* 018 */ private long range_partitionEnd;
/* 019 */ private long range_number;
/* 020 */ private boolean range_overflow;
/* 021 */ private scala.collection.Iterator range_input;
/* 022 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */
/* 028 */ public GeneratedIterator(Object[] references) {
/* 029 */ this.references = references;
/* 030 */ }
/* 031 */
/* 032 */ public void init(scala.collection.Iterator inputs[]) {
/* 033 */ agg_initAgg = false;
/* 034 */
/* 035 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 036 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 037 */ range_initRange = false;
/* 038 */ range_partitionEnd = 0L;
/* 039 */ range_number = 0L;
/* 040 */ range_overflow = false;
/* 041 */ range_input = inputs[0];
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ agg_result = new UnsafeRow(1);
/* 045 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 046 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 047 */ }
/* 048 */
/* 049 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 050 */ // initialize aggregation buffer
/* 051 */ agg_bufIsNull = false;
/* 052 */ agg_bufValue = 0L;
/* 053 */
/* 054 */ /*** PRODUCE: Project */
/* 055 */
/* 056 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#60L] */
/* 057 */
/* 058 */ // initialize Range
/* 059 */ if (!range_initRange) {
/* 060 */ range_initRange = true;
/* 061 */ if (range_input.hasNext()) {
/* 062 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 063 */ } else {
/* 064 */ return;
/* 065 */ }
/* 066 */ }
/* 067 */
/* 068 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 069 */ long range_value = range_number;
/* 070 */ range_number += 1L;
/* 071 */ if (range_number < range_value ^ 1L < 0) {
/* 072 */ range_overflow = true;
/* 073 */ }
/* 074 */
/* 075 */ /*** CONSUME: Project */
/* 076 */
/* 077 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#69L]) */
/* 078 */
/* 079 */ // do aggregate
/* 080 */ /* (input[0, bigint] + 1) */
/* 081 */ long agg_value1 = -1L;
/* 082 */ agg_value1 = agg_bufValue + 1L;
/* 083 */ // update aggregation buffer
/* 084 */ agg_bufIsNull = false;
/* 085 */ agg_bufValue = agg_value1;
/* 086 */
/* 087 */ }
/* 088 */
/* 089 */ }
/* 090 */
/* 091 */ private void initRange(int idx) {
/* 092 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 093 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 094 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 095 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 096 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 097 */
/* 098 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 099 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 100 */ range_number = Long.MAX_VALUE;
/* 101 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 102 */ range_number = Long.MIN_VALUE;
/* 103 */ } else {
/* 104 */ range_number = st.longValue();
/* 105 */ }
/* 106 */
/* 107 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 108 */ .multiply(step).add(start);
/* 109 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 110 */ range_partitionEnd = Long.MAX_VALUE;
/* 111 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 112 */ range_partitionEnd = Long.MIN_VALUE;
/* 113 */ } else {
/* 114 */ range_partitionEnd = end.longValue();
/* 115 */ }
/* 116 */
/* 117 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 118 */ }
/* 119 */
/* 120 */ protected void processNext() throws java.io.IOException {
/* 121 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#69L]) */
/* 122 */
/* 123 */ while (!agg_initAgg) {
/* 124 */ agg_initAgg = true;
/* 125 */ agg_doAggregateWithoutKey();
/* 126 */
/* 127 */ // output the result
/* 128 */
/* 129 */ agg_metricValue.add(1);
/* 130 */ agg_rowWriter.zeroOutNullBytes();
/* 131 */
/* 132 */ if (agg_bufIsNull) {
/* 133 */ agg_rowWriter.setNullAt(0);
/* 134 */ } else {
/* 135 */ agg_rowWriter.write(0, agg_bufValue);
/* 136 */ }
/* 137 */ append(agg_result);
/* 138 */ }
/* 139 */ }
/* 140 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#74L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private boolean agg_bufIsNull;
/* 013 */ private long agg_bufValue;
/* 014 */ private scala.collection.Iterator inputadapter_input;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */
/* 021 */ public GeneratedIterator(Object[] references) {
/* 022 */ this.references = references;
/* 023 */ }
/* 024 */
/* 025 */ public void init(scala.collection.Iterator inputs[]) {
/* 026 */ agg_initAgg = false;
/* 027 */
/* 028 */ inputadapter_input = inputs[0];
/* 029 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 030 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 031 */ agg_result = new UnsafeRow(1);
/* 032 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 033 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 034 */ }
/* 035 */
/* 036 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 037 */ // initialize aggregation buffer
/* 038 */ agg_bufIsNull = false;
/* 039 */ agg_bufValue = 0L;
/* 040 */
/* 041 */ /*** PRODUCE: INPUT */
/* 042 */
/* 043 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 044 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 045 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#74L]) */
/* 046 */ /* input[0, bigint] */
/* 047 */ long agg_value3 = inputadapter_row.getLong(0);
/* 048 */
/* 049 */ // do aggregate
/* 050 */ /* (input[0, bigint] + input[1, bigint]) */
/* 051 */ long agg_value4 = -1L;
/* 052 */ agg_value4 = agg_bufValue + agg_value3;
/* 053 */ // update aggregation buffer
/* 054 */ agg_bufIsNull = false;
/* 055 */ agg_bufValue = agg_value4;
/* 056 */ }
/* 057 */
/* 058 */ }
/* 059 */
/* 060 */ protected void processNext() throws java.io.IOException {
/* 061 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#74L]) */
/* 062 */
/* 063 */ while (!agg_initAgg) {
/* 064 */ agg_initAgg = true;
/* 065 */ agg_doAggregateWithoutKey();
/* 066 */
/* 067 */ // output the result
/* 068 */
/* 069 */ agg_metricValue.add(1);
/* 070 */ agg_rowWriter.zeroOutNullBytes();
/* 071 */
/* 072 */ if (agg_bufIsNull) {
/* 073 */ agg_rowWriter.setNullAt(0);
/* 074 */ } else {
/* 075 */ agg_rowWriter.write(0, agg_bufValue);
/* 076 */ }
/* 077 */ append(agg_result);
/* 078 */ }
/* 079 */ }
/* 080 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#79L])
/* 007 */ +- Project
/* 008 */ +- Range 0, 1, 1, 20971520, [id#70L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private boolean agg_bufIsNull;
/* 014 */ private long agg_bufValue;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 017 */ private boolean range_initRange;
/* 018 */ private long range_partitionEnd;
/* 019 */ private long range_number;
/* 020 */ private boolean range_overflow;
/* 021 */ private scala.collection.Iterator range_input;
/* 022 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */
/* 028 */ public GeneratedIterator(Object[] references) {
/* 029 */ this.references = references;
/* 030 */ }
/* 031 */
/* 032 */ public void init(scala.collection.Iterator inputs[]) {
/* 033 */ agg_initAgg = false;
/* 034 */
/* 035 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 036 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 037 */ range_initRange = false;
/* 038 */ range_partitionEnd = 0L;
/* 039 */ range_number = 0L;
/* 040 */ range_overflow = false;
/* 041 */ range_input = inputs[0];
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ agg_result = new UnsafeRow(1);
/* 045 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 046 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 047 */ }
/* 048 */
/* 049 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 050 */ // initialize aggregation buffer
/* 051 */ agg_bufIsNull = false;
/* 052 */ agg_bufValue = 0L;
/* 053 */
/* 054 */ /*** PRODUCE: Project */
/* 055 */
/* 056 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#70L] */
/* 057 */
/* 058 */ // initialize Range
/* 059 */ if (!range_initRange) {
/* 060 */ range_initRange = true;
/* 061 */ if (range_input.hasNext()) {
/* 062 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 063 */ } else {
/* 064 */ return;
/* 065 */ }
/* 066 */ }
/* 067 */
/* 068 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 069 */ long range_value = range_number;
/* 070 */ range_number += 1L;
/* 071 */ if (range_number < range_value ^ 1L < 0) {
/* 072 */ range_overflow = true;
/* 073 */ }
/* 074 */
/* 075 */ /*** CONSUME: Project */
/* 076 */
/* 077 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#79L]) */
/* 078 */
/* 079 */ // do aggregate
/* 080 */ /* (input[0, bigint] + 1) */
/* 081 */ long agg_value1 = -1L;
/* 082 */ agg_value1 = agg_bufValue + 1L;
/* 083 */ // update aggregation buffer
/* 084 */ agg_bufIsNull = false;
/* 085 */ agg_bufValue = agg_value1;
/* 086 */
/* 087 */ }
/* 088 */
/* 089 */ }
/* 090 */
/* 091 */ private void initRange(int idx) {
/* 092 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 093 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 094 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 095 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 096 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 097 */
/* 098 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 099 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 100 */ range_number = Long.MAX_VALUE;
/* 101 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 102 */ range_number = Long.MIN_VALUE;
/* 103 */ } else {
/* 104 */ range_number = st.longValue();
/* 105 */ }
/* 106 */
/* 107 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 108 */ .multiply(step).add(start);
/* 109 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 110 */ range_partitionEnd = Long.MAX_VALUE;
/* 111 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 112 */ range_partitionEnd = Long.MIN_VALUE;
/* 113 */ } else {
/* 114 */ range_partitionEnd = end.longValue();
/* 115 */ }
/* 116 */
/* 117 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 118 */ }
/* 119 */
/* 120 */ protected void processNext() throws java.io.IOException {
/* 121 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#79L]) */
/* 122 */
/* 123 */ while (!agg_initAgg) {
/* 124 */ agg_initAgg = true;
/* 125 */ agg_doAggregateWithoutKey();
/* 126 */
/* 127 */ // output the result
/* 128 */
/* 129 */ agg_metricValue.add(1);
/* 130 */ agg_rowWriter.zeroOutNullBytes();
/* 131 */
/* 132 */ if (agg_bufIsNull) {
/* 133 */ agg_rowWriter.setNullAt(0);
/* 134 */ } else {
/* 135 */ agg_rowWriter.write(0, agg_bufValue);
/* 136 */ }
/* 137 */ append(agg_result);
/* 138 */ }
/* 139 */ }
/* 140 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#84L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private boolean agg_bufIsNull;
/* 013 */ private long agg_bufValue;
/* 014 */ private scala.collection.Iterator inputadapter_input;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */
/* 021 */ public GeneratedIterator(Object[] references) {
/* 022 */ this.references = references;
/* 023 */ }
/* 024 */
/* 025 */ public void init(scala.collection.Iterator inputs[]) {
/* 026 */ agg_initAgg = false;
/* 027 */
/* 028 */ inputadapter_input = inputs[0];
/* 029 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 030 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 031 */ agg_result = new UnsafeRow(1);
/* 032 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 033 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 034 */ }
/* 035 */
/* 036 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 037 */ // initialize aggregation buffer
/* 038 */ agg_bufIsNull = false;
/* 039 */ agg_bufValue = 0L;
/* 040 */
/* 041 */ /*** PRODUCE: INPUT */
/* 042 */
/* 043 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 044 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 045 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#84L]) */
/* 046 */ /* input[0, bigint] */
/* 047 */ long agg_value3 = inputadapter_row.getLong(0);
/* 048 */
/* 049 */ // do aggregate
/* 050 */ /* (input[0, bigint] + input[1, bigint]) */
/* 051 */ long agg_value4 = -1L;
/* 052 */ agg_value4 = agg_bufValue + agg_value3;
/* 053 */ // update aggregation buffer
/* 054 */ agg_bufIsNull = false;
/* 055 */ agg_bufValue = agg_value4;
/* 056 */ }
/* 057 */
/* 058 */ }
/* 059 */
/* 060 */ protected void processNext() throws java.io.IOException {
/* 061 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#84L]) */
/* 062 */
/* 063 */ while (!agg_initAgg) {
/* 064 */ agg_initAgg = true;
/* 065 */ agg_doAggregateWithoutKey();
/* 066 */
/* 067 */ // output the result
/* 068 */
/* 069 */ agg_metricValue.add(1);
/* 070 */ agg_rowWriter.zeroOutNullBytes();
/* 071 */
/* 072 */ if (agg_bufIsNull) {
/* 073 */ agg_rowWriter.setNullAt(0);
/* 074 */ } else {
/* 075 */ agg_rowWriter.write(0, agg_bufValue);
/* 076 */ }
/* 077 */ append(agg_result);
/* 078 */ }
/* 079 */ }
/* 080 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#89L])
/* 007 */ +- Project
/* 008 */ +- Range 0, 1, 1, 20971520, [id#80L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private boolean agg_bufIsNull;
/* 014 */ private long agg_bufValue;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 017 */ private boolean range_initRange;
/* 018 */ private long range_partitionEnd;
/* 019 */ private long range_number;
/* 020 */ private boolean range_overflow;
/* 021 */ private scala.collection.Iterator range_input;
/* 022 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */
/* 028 */ public GeneratedIterator(Object[] references) {
/* 029 */ this.references = references;
/* 030 */ }
/* 031 */
/* 032 */ public void init(scala.collection.Iterator inputs[]) {
/* 033 */ agg_initAgg = false;
/* 034 */
/* 035 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 036 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 037 */ range_initRange = false;
/* 038 */ range_partitionEnd = 0L;
/* 039 */ range_number = 0L;
/* 040 */ range_overflow = false;
/* 041 */ range_input = inputs[0];
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ agg_result = new UnsafeRow(1);
/* 045 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 046 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 047 */ }
/* 048 */
/* 049 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 050 */ // initialize aggregation buffer
/* 051 */ agg_bufIsNull = false;
/* 052 */ agg_bufValue = 0L;
/* 053 */
/* 054 */ /*** PRODUCE: Project */
/* 055 */
/* 056 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#80L] */
/* 057 */
/* 058 */ // initialize Range
/* 059 */ if (!range_initRange) {
/* 060 */ range_initRange = true;
/* 061 */ if (range_input.hasNext()) {
/* 062 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 063 */ } else {
/* 064 */ return;
/* 065 */ }
/* 066 */ }
/* 067 */
/* 068 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 069 */ long range_value = range_number;
/* 070 */ range_number += 1L;
/* 071 */ if (range_number < range_value ^ 1L < 0) {
/* 072 */ range_overflow = true;
/* 073 */ }
/* 074 */
/* 075 */ /*** CONSUME: Project */
/* 076 */
/* 077 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#89L]) */
/* 078 */
/* 079 */ // do aggregate
/* 080 */ /* (input[0, bigint] + 1) */
/* 081 */ long agg_value1 = -1L;
/* 082 */ agg_value1 = agg_bufValue + 1L;
/* 083 */ // update aggregation buffer
/* 084 */ agg_bufIsNull = false;
/* 085 */ agg_bufValue = agg_value1;
/* 086 */
/* 087 */ }
/* 088 */
/* 089 */ }
/* 090 */
/* 091 */ private void initRange(int idx) {
/* 092 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 093 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 094 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 095 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 096 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 097 */
/* 098 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 099 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 100 */ range_number = Long.MAX_VALUE;
/* 101 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 102 */ range_number = Long.MIN_VALUE;
/* 103 */ } else {
/* 104 */ range_number = st.longValue();
/* 105 */ }
/* 106 */
/* 107 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 108 */ .multiply(step).add(start);
/* 109 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 110 */ range_partitionEnd = Long.MAX_VALUE;
/* 111 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 112 */ range_partitionEnd = Long.MIN_VALUE;
/* 113 */ } else {
/* 114 */ range_partitionEnd = end.longValue();
/* 115 */ }
/* 116 */
/* 117 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 118 */ }
/* 119 */
/* 120 */ protected void processNext() throws java.io.IOException {
/* 121 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#89L]) */
/* 122 */
/* 123 */ while (!agg_initAgg) {
/* 124 */ agg_initAgg = true;
/* 125 */ agg_doAggregateWithoutKey();
/* 126 */
/* 127 */ // output the result
/* 128 */
/* 129 */ agg_metricValue.add(1);
/* 130 */ agg_rowWriter.zeroOutNullBytes();
/* 131 */
/* 132 */ if (agg_bufIsNull) {
/* 133 */ agg_rowWriter.setNullAt(0);
/* 134 */ } else {
/* 135 */ agg_rowWriter.write(0, agg_bufValue);
/* 136 */ }
/* 137 */ append(agg_result);
/* 138 */ }
/* 139 */ }
/* 140 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#94L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private boolean agg_bufIsNull;
/* 013 */ private long agg_bufValue;
/* 014 */ private scala.collection.Iterator inputadapter_input;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */
/* 021 */ public GeneratedIterator(Object[] references) {
/* 022 */ this.references = references;
/* 023 */ }
/* 024 */
/* 025 */ public void init(scala.collection.Iterator inputs[]) {
/* 026 */ agg_initAgg = false;
/* 027 */
/* 028 */ inputadapter_input = inputs[0];
/* 029 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 030 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 031 */ agg_result = new UnsafeRow(1);
/* 032 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 033 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 034 */ }
/* 035 */
/* 036 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 037 */ // initialize aggregation buffer
/* 038 */ agg_bufIsNull = false;
/* 039 */ agg_bufValue = 0L;
/* 040 */
/* 041 */ /*** PRODUCE: INPUT */
/* 042 */
/* 043 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 044 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 045 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#94L]) */
/* 046 */ /* input[0, bigint] */
/* 047 */ long agg_value3 = inputadapter_row.getLong(0);
/* 048 */
/* 049 */ // do aggregate
/* 050 */ /* (input[0, bigint] + input[1, bigint]) */
/* 051 */ long agg_value4 = -1L;
/* 052 */ agg_value4 = agg_bufValue + agg_value3;
/* 053 */ // update aggregation buffer
/* 054 */ agg_bufIsNull = false;
/* 055 */ agg_bufValue = agg_value4;
/* 056 */ }
/* 057 */
/* 058 */ }
/* 059 */
/* 060 */ protected void processNext() throws java.io.IOException {
/* 061 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#94L]) */
/* 062 */
/* 063 */ while (!agg_initAgg) {
/* 064 */ agg_initAgg = true;
/* 065 */ agg_doAggregateWithoutKey();
/* 066 */
/* 067 */ // output the result
/* 068 */
/* 069 */ agg_metricValue.add(1);
/* 070 */ agg_rowWriter.zeroOutNullBytes();
/* 071 */
/* 072 */ if (agg_bufIsNull) {
/* 073 */ agg_rowWriter.setNullAt(0);
/* 074 */ } else {
/* 075 */ agg_rowWriter.write(0, agg_bufValue);
/* 076 */ }
/* 077 */ append(agg_result);
/* 078 */ }
/* 079 */ }
/* 080 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#99L])
/* 007 */ +- Project
/* 008 */ +- Range 0, 1, 1, 20971520, [id#90L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private boolean agg_bufIsNull;
/* 014 */ private long agg_bufValue;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 017 */ private boolean range_initRange;
/* 018 */ private long range_partitionEnd;
/* 019 */ private long range_number;
/* 020 */ private boolean range_overflow;
/* 021 */ private scala.collection.Iterator range_input;
/* 022 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */
/* 028 */ public GeneratedIterator(Object[] references) {
/* 029 */ this.references = references;
/* 030 */ }
/* 031 */
/* 032 */ public void init(scala.collection.Iterator inputs[]) {
/* 033 */ agg_initAgg = false;
/* 034 */
/* 035 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 036 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 037 */ range_initRange = false;
/* 038 */ range_partitionEnd = 0L;
/* 039 */ range_number = 0L;
/* 040 */ range_overflow = false;
/* 041 */ range_input = inputs[0];
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ agg_result = new UnsafeRow(1);
/* 045 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 046 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 047 */ }
/* 048 */
/* 049 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 050 */ // initialize aggregation buffer
/* 051 */ agg_bufIsNull = false;
/* 052 */ agg_bufValue = 0L;
/* 053 */
/* 054 */ /*** PRODUCE: Project */
/* 055 */
/* 056 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#90L] */
/* 057 */
/* 058 */ // initialize Range
/* 059 */ if (!range_initRange) {
/* 060 */ range_initRange = true;
/* 061 */ if (range_input.hasNext()) {
/* 062 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 063 */ } else {
/* 064 */ return;
/* 065 */ }
/* 066 */ }
/* 067 */
/* 068 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 069 */ long range_value = range_number;
/* 070 */ range_number += 1L;
/* 071 */ if (range_number < range_value ^ 1L < 0) {
/* 072 */ range_overflow = true;
/* 073 */ }
/* 074 */
/* 075 */ /*** CONSUME: Project */
/* 076 */
/* 077 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#99L]) */
/* 078 */
/* 079 */ // do aggregate
/* 080 */ /* (input[0, bigint] + 1) */
/* 081 */ long agg_value1 = -1L;
/* 082 */ agg_value1 = agg_bufValue + 1L;
/* 083 */ // update aggregation buffer
/* 084 */ agg_bufIsNull = false;
/* 085 */ agg_bufValue = agg_value1;
/* 086 */
/* 087 */ }
/* 088 */
/* 089 */ }
/* 090 */
/* 091 */ private void initRange(int idx) {
/* 092 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 093 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 094 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 095 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 096 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 097 */
/* 098 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 099 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 100 */ range_number = Long.MAX_VALUE;
/* 101 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 102 */ range_number = Long.MIN_VALUE;
/* 103 */ } else {
/* 104 */ range_number = st.longValue();
/* 105 */ }
/* 106 */
/* 107 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 108 */ .multiply(step).add(start);
/* 109 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 110 */ range_partitionEnd = Long.MAX_VALUE;
/* 111 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 112 */ range_partitionEnd = Long.MIN_VALUE;
/* 113 */ } else {
/* 114 */ range_partitionEnd = end.longValue();
/* 115 */ }
/* 116 */
/* 117 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 118 */ }
/* 119 */
/* 120 */ protected void processNext() throws java.io.IOException {
/* 121 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#99L]) */
/* 122 */
/* 123 */ while (!agg_initAgg) {
/* 124 */ agg_initAgg = true;
/* 125 */ agg_doAggregateWithoutKey();
/* 126 */
/* 127 */ // output the result
/* 128 */
/* 129 */ agg_metricValue.add(1);
/* 130 */ agg_rowWriter.zeroOutNullBytes();
/* 131 */
/* 132 */ if (agg_bufIsNull) {
/* 133 */ agg_rowWriter.setNullAt(0);
/* 134 */ } else {
/* 135 */ agg_rowWriter.write(0, agg_bufValue);
/* 136 */ }
/* 137 */ append(agg_result);
/* 138 */ }
/* 139 */ }
/* 140 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#104L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private boolean agg_bufIsNull;
/* 013 */ private long agg_bufValue;
/* 014 */ private scala.collection.Iterator inputadapter_input;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */
/* 021 */ public GeneratedIterator(Object[] references) {
/* 022 */ this.references = references;
/* 023 */ }
/* 024 */
/* 025 */ public void init(scala.collection.Iterator inputs[]) {
/* 026 */ agg_initAgg = false;
/* 027 */
/* 028 */ inputadapter_input = inputs[0];
/* 029 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 030 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 031 */ agg_result = new UnsafeRow(1);
/* 032 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 033 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 034 */ }
/* 035 */
/* 036 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 037 */ // initialize aggregation buffer
/* 038 */ agg_bufIsNull = false;
/* 039 */ agg_bufValue = 0L;
/* 040 */
/* 041 */ /*** PRODUCE: INPUT */
/* 042 */
/* 043 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 044 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 045 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#104L]) */
/* 046 */ /* input[0, bigint] */
/* 047 */ long agg_value3 = inputadapter_row.getLong(0);
/* 048 */
/* 049 */ // do aggregate
/* 050 */ /* (input[0, bigint] + input[1, bigint]) */
/* 051 */ long agg_value4 = -1L;
/* 052 */ agg_value4 = agg_bufValue + agg_value3;
/* 053 */ // update aggregation buffer
/* 054 */ agg_bufIsNull = false;
/* 055 */ agg_bufValue = agg_value4;
/* 056 */ }
/* 057 */
/* 058 */ }
/* 059 */
/* 060 */ protected void processNext() throws java.io.IOException {
/* 061 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#104L]) */
/* 062 */
/* 063 */ while (!agg_initAgg) {
/* 064 */ agg_initAgg = true;
/* 065 */ agg_doAggregateWithoutKey();
/* 066 */
/* 067 */ // output the result
/* 068 */
/* 069 */ agg_metricValue.add(1);
/* 070 */ agg_rowWriter.zeroOutNullBytes();
/* 071 */
/* 072 */ if (agg_bufIsNull) {
/* 073 */ agg_rowWriter.setNullAt(0);
/* 074 */ } else {
/* 075 */ agg_rowWriter.write(0, agg_bufValue);
/* 076 */ }
/* 077 */ append(agg_result);
/* 078 */ }
/* 079 */ }
/* 080 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#109L])
/* 007 */ +- Project
/* 008 */ +- Range 0, 1, 1, 20971520, [id#100L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private boolean agg_bufIsNull;
/* 014 */ private long agg_bufValue;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 017 */ private boolean range_initRange;
/* 018 */ private long range_partitionEnd;
/* 019 */ private long range_number;
/* 020 */ private boolean range_overflow;
/* 021 */ private scala.collection.Iterator range_input;
/* 022 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */
/* 028 */ public GeneratedIterator(Object[] references) {
/* 029 */ this.references = references;
/* 030 */ }
/* 031 */
/* 032 */ public void init(scala.collection.Iterator inputs[]) {
/* 033 */ agg_initAgg = false;
/* 034 */
/* 035 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 036 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 037 */ range_initRange = false;
/* 038 */ range_partitionEnd = 0L;
/* 039 */ range_number = 0L;
/* 040 */ range_overflow = false;
/* 041 */ range_input = inputs[0];
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ agg_result = new UnsafeRow(1);
/* 045 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 046 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 047 */ }
/* 048 */
/* 049 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 050 */ // initialize aggregation buffer
/* 051 */ agg_bufIsNull = false;
/* 052 */ agg_bufValue = 0L;
/* 053 */
/* 054 */ /*** PRODUCE: Project */
/* 055 */
/* 056 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#100L] */
/* 057 */
/* 058 */ // initialize Range
/* 059 */ if (!range_initRange) {
/* 060 */ range_initRange = true;
/* 061 */ if (range_input.hasNext()) {
/* 062 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 063 */ } else {
/* 064 */ return;
/* 065 */ }
/* 066 */ }
/* 067 */
/* 068 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 069 */ long range_value = range_number;
/* 070 */ range_number += 1L;
/* 071 */ if (range_number < range_value ^ 1L < 0) {
/* 072 */ range_overflow = true;
/* 073 */ }
/* 074 */
/* 075 */ /*** CONSUME: Project */
/* 076 */
/* 077 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#109L]) */
/* 078 */
/* 079 */ // do aggregate
/* 080 */ /* (input[0, bigint] + 1) */
/* 081 */ long agg_value1 = -1L;
/* 082 */ agg_value1 = agg_bufValue + 1L;
/* 083 */ // update aggregation buffer
/* 084 */ agg_bufIsNull = false;
/* 085 */ agg_bufValue = agg_value1;
/* 086 */
/* 087 */ }
/* 088 */
/* 089 */ }
/* 090 */
/* 091 */ private void initRange(int idx) {
/* 092 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 093 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 094 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 095 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 096 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 097 */
/* 098 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 099 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 100 */ range_number = Long.MAX_VALUE;
/* 101 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 102 */ range_number = Long.MIN_VALUE;
/* 103 */ } else {
/* 104 */ range_number = st.longValue();
/* 105 */ }
/* 106 */
/* 107 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 108 */ .multiply(step).add(start);
/* 109 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 110 */ range_partitionEnd = Long.MAX_VALUE;
/* 111 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 112 */ range_partitionEnd = Long.MIN_VALUE;
/* 113 */ } else {
/* 114 */ range_partitionEnd = end.longValue();
/* 115 */ }
/* 116 */
/* 117 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 118 */ }
/* 119 */
/* 120 */ protected void processNext() throws java.io.IOException {
/* 121 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#109L]) */
/* 122 */
/* 123 */ while (!agg_initAgg) {
/* 124 */ agg_initAgg = true;
/* 125 */ agg_doAggregateWithoutKey();
/* 126 */
/* 127 */ // output the result
/* 128 */
/* 129 */ agg_metricValue.add(1);
/* 130 */ agg_rowWriter.zeroOutNullBytes();
/* 131 */
/* 132 */ if (agg_bufIsNull) {
/* 133 */ agg_rowWriter.setNullAt(0);
/* 134 */ } else {
/* 135 */ agg_rowWriter.write(0, agg_bufValue);
/* 136 */ }
/* 137 */ append(agg_result);
/* 138 */ }
/* 139 */ }
/* 140 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#114L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private boolean agg_bufIsNull;
/* 013 */ private long agg_bufValue;
/* 014 */ private scala.collection.Iterator inputadapter_input;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */
/* 021 */ public GeneratedIterator(Object[] references) {
/* 022 */ this.references = references;
/* 023 */ }
/* 024 */
/* 025 */ public void init(scala.collection.Iterator inputs[]) {
/* 026 */ agg_initAgg = false;
/* 027 */
/* 028 */ inputadapter_input = inputs[0];
/* 029 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 030 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 031 */ agg_result = new UnsafeRow(1);
/* 032 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 033 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 034 */ }
/* 035 */
/* 036 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 037 */ // initialize aggregation buffer
/* 038 */ agg_bufIsNull = false;
/* 039 */ agg_bufValue = 0L;
/* 040 */
/* 041 */ /*** PRODUCE: INPUT */
/* 042 */
/* 043 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 044 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 045 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#114L]) */
/* 046 */ /* input[0, bigint] */
/* 047 */ long agg_value3 = inputadapter_row.getLong(0);
/* 048 */
/* 049 */ // do aggregate
/* 050 */ /* (input[0, bigint] + input[1, bigint]) */
/* 051 */ long agg_value4 = -1L;
/* 052 */ agg_value4 = agg_bufValue + agg_value3;
/* 053 */ // update aggregation buffer
/* 054 */ agg_bufIsNull = false;
/* 055 */ agg_bufValue = agg_value4;
/* 056 */ }
/* 057 */
/* 058 */ }
/* 059 */
/* 060 */ protected void processNext() throws java.io.IOException {
/* 061 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#114L]) */
/* 062 */
/* 063 */ while (!agg_initAgg) {
/* 064 */ agg_initAgg = true;
/* 065 */ agg_doAggregateWithoutKey();
/* 066 */
/* 067 */ // output the result
/* 068 */
/* 069 */ agg_metricValue.add(1);
/* 070 */ agg_rowWriter.zeroOutNullBytes();
/* 071 */
/* 072 */ if (agg_bufIsNull) {
/* 073 */ agg_rowWriter.setNullAt(0);
/* 074 */ } else {
/* 075 */ agg_rowWriter.write(0, agg_bufValue);
/* 076 */ }
/* 077 */ append(agg_result);
/* 078 */ }
/* 079 */ }
/* 080 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#119L])
/* 007 */ +- Project
/* 008 */ +- Range 0, 1, 1, 20971520, [id#110L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private boolean agg_bufIsNull;
/* 014 */ private long agg_bufValue;
/* 015 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 016 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 017 */ private boolean range_initRange;
/* 018 */ private long range_partitionEnd;
/* 019 */ private long range_number;
/* 020 */ private boolean range_overflow;
/* 021 */ private scala.collection.Iterator range_input;
/* 022 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */
/* 028 */ public GeneratedIterator(Object[] references) {
/* 029 */ this.references = references;
/* 030 */ }
/* 031 */
/* 032 */ public void init(scala.collection.Iterator inputs[]) {
/* 033 */ agg_initAgg = false;
/* 034 */
/* 035 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[0];
/* 036 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 037 */ range_initRange = false;
/* 038 */ range_partitionEnd = 0L;
/* 039 */ range_number = 0L;
/* 040 */ range_overflow = false;
/* 041 */ range_input = inputs[0];
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ agg_result = new UnsafeRow(1);
/* 045 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 046 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 047 */ }
/* 048 */
/* 049 */ private void agg_doAggregateWithoutKey() throws java.io.IOException {
/* 050 */ // initialize aggregation buffer
/* 051 */ agg_bufIsNull = false;
/* 052 */ agg_bufValue = 0L;
/* 053 */
/* 054 */ /*** PRODUCE: Project */
/* 055 */
/* 056 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#110L] */
/* 057 */
/* 058 */ // initialize Range
/* 059 */ if (!range_initRange) {
/* 060 */ range_initRange = true;
/* 061 */ if (range_input.hasNext()) {
/* 062 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 063 */ } else {
/* 064 */ return;
/* 065 */ }
/* 066 */ }
/* 067 */
/* 068 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 069 */ long range_value = range_number;
/* 070 */ range_number += 1L;
/* 071 */ if (range_number < range_value ^ 1L < 0) {
/* 072 */ range_overflow = true;
/* 073 */ }
/* 074 */
/* 075 */ /*** CONSUME: Project */
/* 076 */
/* 077 */ /*** CONSUME: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#119L]) */
/* 078 */
/* 079 */ // do aggregate
/* 080 */ /* (input[0, bigint] + 1) */
/* 081 */ long agg_value1 = -1L;
/* 082 */ agg_value1 = agg_bufValue + 1L;
/* 083 */ // update aggregation buffer
/* 084 */ agg_bufIsNull = false;
/* 085 */ agg_bufValue = agg_value1;
/* 086 */
/* 087 */ }
/* 088 */
/* 089 */ }
/* 090 */
/* 091 */ private void initRange(int idx) {
/* 092 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 093 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 094 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 095 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 096 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 097 */
/* 098 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 099 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 100 */ range_number = Long.MAX_VALUE;
/* 101 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 102 */ range_number = Long.MIN_VALUE;
/* 103 */ } else {
/* 104 */ range_number = st.longValue();
/* 105 */ }
/* 106 */
/* 107 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 108 */ .multiply(step).add(start);
/* 109 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 110 */ range_partitionEnd = Long.MAX_VALUE;
/* 111 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 112 */ range_partitionEnd = Long.MIN_VALUE;
/* 113 */ } else {
/* 114 */ range_partitionEnd = end.longValue();
/* 115 */ }
/* 116 */
/* 117 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 118 */ }
/* 119 */
/* 120 */ protected void processNext() throws java.io.IOException {
/* 121 */ /*** PRODUCE: TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#119L]) */
/* 122 */
/* 123 */ while (!agg_initAgg) {
/* 124 */ agg_initAgg = true;
/* 125 */ agg_doAggregateWithoutKey();
/* 126 */
/* 127 */ // output the result
/* 128 */
/* 129 */ agg_metricValue.add(1);
/* 130 */ agg_rowWriter.zeroOutNullBytes();
/* 131 */
/* 132 */ if (agg_bufIsNull) {
/* 133 */ agg_rowWriter.setNullAt(0);
/* 134 */ } else {
/* 135 */ agg_rowWriter.write(0, agg_bufValue);
/* 136 */ }
/* 137 */ append(agg_result);
/* 138 */ }
/* 139 */ }
/* 140 */ }
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
count: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
count codegen=false 632 / 701 33.2 30.1 1.0X
count codegen=true 106 / 166 197.7 5.1 6.0X
Running benchmark: Aggregate w keys
Running case: Aggregate w keys codegen=false
Running case: Aggregate w keys codegen=true
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#195L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#195L,count#198L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 013 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 014 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 015 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 016 */ private scala.collection.Iterator inputadapter_input;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */ private UnsafeRow agg_result1;
/* 021 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder1;
/* 022 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter1;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 024 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 025 */
/* 026 */ public GeneratedIterator(Object[] references) {
/* 027 */ this.references = references;
/* 028 */ }
/* 029 */
/* 030 */ public void init(scala.collection.Iterator inputs[]) {
/* 031 */ agg_initAgg = false;
/* 032 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 033 */ agg_hashMap = agg_plan.createHashMap();
/* 034 */
/* 035 */ inputadapter_input = inputs[0];
/* 036 */ agg_result = new UnsafeRow(1);
/* 037 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 038 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 039 */ agg_result1 = new UnsafeRow(2);
/* 040 */ this.agg_holder1 = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result1, 0);
/* 041 */ this.agg_rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder1, 2);
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ }
/* 045 */
/* 046 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 047 */ /*** PRODUCE: INPUT */
/* 048 */
/* 049 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 050 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 051 */ /*** CONSUME: TungstenAggregate(key=[k#195L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#195L,count#198L]) */
/* 052 */ /* input[0, bigint] */
/* 053 */ long agg_value = inputadapter_row.getLong(0);
/* 054 */ /* input[1, bigint] */
/* 055 */ long agg_value1 = inputadapter_row.getLong(1);
/* 056 */
/* 057 */ // generate grouping key
/* 058 */ agg_rowWriter.write(0, agg_value);
/* 059 */ /* hash(input[0, bigint], 42) */
/* 060 */ int agg_value3 = 42;
/* 061 */
/* 062 */ agg_value3 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(agg_value, agg_value3);
/* 063 */ UnsafeRow agg_aggBuffer = null;
/* 064 */ if (true) {
/* 065 */ // try to get the buffer from hash map
/* 066 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 067 */ }
/* 068 */ if (agg_aggBuffer == null) {
/* 069 */ if (agg_sorter == null) {
/* 070 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 071 */ } else {
/* 072 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 073 */ }
/* 074 */
/* 075 */ // the hash map had be spilled, it should have enough memory now,
/* 076 */ // try to allocate buffer again.
/* 077 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 078 */ if (agg_aggBuffer == null) {
/* 079 */ // failed to allocate the first page
/* 080 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 081 */ }
/* 082 */ }
/* 083 */
/* 084 */ // evaluate aggregate function
/* 085 */ /* (input[0, bigint] + input[2, bigint]) */
/* 086 */ /* input[0, bigint] */
/* 087 */ long agg_value6 = agg_aggBuffer.getLong(0);
/* 088 */
/* 089 */ long agg_value5 = -1L;
/* 090 */ agg_value5 = agg_value6 + agg_value1;
/* 091 */ // update aggregate buffer
/* 092 */ agg_aggBuffer.setLong(0, agg_value5);
/* 093 */ }
/* 094 */
/* 095 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 096 */ }
/* 097 */
/* 098 */ protected void processNext() throws java.io.IOException {
/* 099 */ /*** PRODUCE: TungstenAggregate(key=[k#195L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#195L,count#198L]) */
/* 100 */
/* 101 */ if (!agg_initAgg) {
/* 102 */ agg_initAgg = true;
/* 103 */ agg_doAggregateWithKeys();
/* 104 */ }
/* 105 */
/* 106 */ // output the result
/* 107 */ while (agg_mapIter.next()) {
/* 108 */ agg_metricValue.add(1);
/* 109 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 110 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 111 */
/* 112 */ /* input[0, bigint] */
/* 113 */ long agg_value8 = agg_aggKey.getLong(0);
/* 114 */ /* input[0, bigint] */
/* 115 */ long agg_value9 = agg_aggBuffer1.getLong(0);
/* 116 */
/* 117 */ agg_rowWriter1.write(0, agg_value8);
/* 118 */
/* 119 */ agg_rowWriter1.write(1, agg_value9);
/* 120 */ append(agg_result1);
/* 121 */
/* 122 */ if (shouldStop()) return;
/* 123 */ }
/* 124 */
/* 125 */ agg_mapIter.close();
/* 126 */ if (agg_sorter == null) {
/* 127 */ agg_hashMap.free();
/* 128 */ }
/* 129 */ }
/* 130 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#195L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#195L,count#203L])
/* 007 */ +- Project [(id#192L & 2) AS k#195L]
/* 008 */ +- Range 0, 1, 1, 20971520, [id#192L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 014 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 015 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 016 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 017 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 018 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 019 */ private boolean range_initRange;
/* 020 */ private long range_partitionEnd;
/* 021 */ private long range_number;
/* 022 */ private boolean range_overflow;
/* 023 */ private scala.collection.Iterator range_input;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowJoiner agg_unsafeRowJoiner;
/* 028 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 029 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 030 */
/* 031 */ public GeneratedIterator(Object[] references) {
/* 032 */ this.references = references;
/* 033 */ }
/* 034 */
/* 035 */ public void init(scala.collection.Iterator inputs[]) {
/* 036 */ agg_initAgg = false;
/* 037 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 038 */ agg_hashMap = agg_plan.createHashMap();
/* 039 */
/* 040 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 041 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 042 */ range_initRange = false;
/* 043 */ range_partitionEnd = 0L;
/* 044 */ range_number = 0L;
/* 045 */ range_overflow = false;
/* 046 */ range_input = inputs[0];
/* 047 */ agg_result = new UnsafeRow(1);
/* 048 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 049 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 050 */ agg_unsafeRowJoiner = agg_plan.createUnsafeJoiner();
/* 051 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[2];
/* 052 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 053 */ }
/* 054 */
/* 055 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 056 */ /*** PRODUCE: Project [(id#192L & 2) AS k#195L] */
/* 057 */
/* 058 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#192L] */
/* 059 */
/* 060 */ // initialize Range
/* 061 */ if (!range_initRange) {
/* 062 */ range_initRange = true;
/* 063 */ if (range_input.hasNext()) {
/* 064 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 065 */ } else {
/* 066 */ return;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 071 */ long range_value = range_number;
/* 072 */ range_number += 1L;
/* 073 */ if (range_number < range_value ^ 1L < 0) {
/* 074 */ range_overflow = true;
/* 075 */ }
/* 076 */
/* 077 */ /*** CONSUME: Project [(id#192L & 2) AS k#195L] */
/* 078 */
/* 079 */ /*** CONSUME: TungstenAggregate(key=[k#195L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#195L,count#203L]) */
/* 080 */ /* (input[0, bigint] & 2) */
/* 081 */ long project_value = -1L;
/* 082 */ project_value = range_value & 2L;
/* 083 */
/* 084 */ // generate grouping key
/* 085 */ agg_rowWriter.write(0, project_value);
/* 086 */ /* hash(input[0, bigint], 42) */
/* 087 */ int agg_value1 = 42;
/* 088 */
/* 089 */ agg_value1 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(project_value, agg_value1);
/* 090 */ UnsafeRow agg_aggBuffer = null;
/* 091 */ if (true) {
/* 092 */ // try to get the buffer from hash map
/* 093 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 094 */ }
/* 095 */ if (agg_aggBuffer == null) {
/* 096 */ if (agg_sorter == null) {
/* 097 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 098 */ } else {
/* 099 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 100 */ }
/* 101 */
/* 102 */ // the hash map had be spilled, it should have enough memory now,
/* 103 */ // try to allocate buffer again.
/* 104 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 105 */ if (agg_aggBuffer == null) {
/* 106 */ // failed to allocate the first page
/* 107 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 108 */ }
/* 109 */ }
/* 110 */
/* 111 */ // evaluate aggregate function
/* 112 */ /* (input[0, bigint] + 1) */
/* 113 */ /* input[0, bigint] */
/* 114 */ long agg_value4 = agg_aggBuffer.getLong(0);
/* 115 */
/* 116 */ long agg_value3 = -1L;
/* 117 */ agg_value3 = agg_value4 + 1L;
/* 118 */ // update aggregate buffer
/* 119 */ agg_aggBuffer.setLong(0, agg_value3);
/* 120 */
/* 121 */ }
/* 122 */
/* 123 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 124 */ }
/* 125 */
/* 126 */ private void initRange(int idx) {
/* 127 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 128 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 129 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 130 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 131 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 132 */
/* 133 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 134 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 135 */ range_number = Long.MAX_VALUE;
/* 136 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 137 */ range_number = Long.MIN_VALUE;
/* 138 */ } else {
/* 139 */ range_number = st.longValue();
/* 140 */ }
/* 141 */
/* 142 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 143 */ .multiply(step).add(start);
/* 144 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 145 */ range_partitionEnd = Long.MAX_VALUE;
/* 146 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 147 */ range_partitionEnd = Long.MIN_VALUE;
/* 148 */ } else {
/* 149 */ range_partitionEnd = end.longValue();
/* 150 */ }
/* 151 */
/* 152 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 153 */ }
/* 154 */
/* 155 */ protected void processNext() throws java.io.IOException {
/* 156 */ /*** PRODUCE: TungstenAggregate(key=[k#195L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#195L,count#203L]) */
/* 157 */
/* 158 */ if (!agg_initAgg) {
/* 159 */ agg_initAgg = true;
/* 160 */ agg_doAggregateWithKeys();
/* 161 */ }
/* 162 */
/* 163 */ // output the result
/* 164 */ while (agg_mapIter.next()) {
/* 165 */ agg_metricValue.add(1);
/* 166 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 167 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 168 */
/* 169 */ UnsafeRow agg_resultRow = agg_unsafeRowJoiner.join(agg_aggKey, agg_aggBuffer1);
/* 170 */ append(agg_resultRow);
/* 171 */
/* 172 */ if (shouldStop()) return;
/* 173 */ }
/* 174 */
/* 175 */ agg_mapIter.close();
/* 176 */ if (agg_sorter == null) {
/* 177 */ agg_hashMap.free();
/* 178 */ }
/* 179 */ }
/* 180 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#207L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#207L,count#210L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 013 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 014 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 015 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 016 */ private scala.collection.Iterator inputadapter_input;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */ private UnsafeRow agg_result1;
/* 021 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder1;
/* 022 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter1;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 024 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 025 */
/* 026 */ public GeneratedIterator(Object[] references) {
/* 027 */ this.references = references;
/* 028 */ }
/* 029 */
/* 030 */ public void init(scala.collection.Iterator inputs[]) {
/* 031 */ agg_initAgg = false;
/* 032 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 033 */ agg_hashMap = agg_plan.createHashMap();
/* 034 */
/* 035 */ inputadapter_input = inputs[0];
/* 036 */ agg_result = new UnsafeRow(1);
/* 037 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 038 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 039 */ agg_result1 = new UnsafeRow(2);
/* 040 */ this.agg_holder1 = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result1, 0);
/* 041 */ this.agg_rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder1, 2);
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ }
/* 045 */
/* 046 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 047 */ /*** PRODUCE: INPUT */
/* 048 */
/* 049 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 050 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 051 */ /*** CONSUME: TungstenAggregate(key=[k#207L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#207L,count#210L]) */
/* 052 */ /* input[0, bigint] */
/* 053 */ long agg_value = inputadapter_row.getLong(0);
/* 054 */ /* input[1, bigint] */
/* 055 */ long agg_value1 = inputadapter_row.getLong(1);
/* 056 */
/* 057 */ // generate grouping key
/* 058 */ agg_rowWriter.write(0, agg_value);
/* 059 */ /* hash(input[0, bigint], 42) */
/* 060 */ int agg_value3 = 42;
/* 061 */
/* 062 */ agg_value3 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(agg_value, agg_value3);
/* 063 */ UnsafeRow agg_aggBuffer = null;
/* 064 */ if (true) {
/* 065 */ // try to get the buffer from hash map
/* 066 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 067 */ }
/* 068 */ if (agg_aggBuffer == null) {
/* 069 */ if (agg_sorter == null) {
/* 070 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 071 */ } else {
/* 072 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 073 */ }
/* 074 */
/* 075 */ // the hash map had be spilled, it should have enough memory now,
/* 076 */ // try to allocate buffer again.
/* 077 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 078 */ if (agg_aggBuffer == null) {
/* 079 */ // failed to allocate the first page
/* 080 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 081 */ }
/* 082 */ }
/* 083 */
/* 084 */ // evaluate aggregate function
/* 085 */ /* (input[0, bigint] + input[2, bigint]) */
/* 086 */ /* input[0, bigint] */
/* 087 */ long agg_value6 = agg_aggBuffer.getLong(0);
/* 088 */
/* 089 */ long agg_value5 = -1L;
/* 090 */ agg_value5 = agg_value6 + agg_value1;
/* 091 */ // update aggregate buffer
/* 092 */ agg_aggBuffer.setLong(0, agg_value5);
/* 093 */ }
/* 094 */
/* 095 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 096 */ }
/* 097 */
/* 098 */ protected void processNext() throws java.io.IOException {
/* 099 */ /*** PRODUCE: TungstenAggregate(key=[k#207L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#207L,count#210L]) */
/* 100 */
/* 101 */ if (!agg_initAgg) {
/* 102 */ agg_initAgg = true;
/* 103 */ agg_doAggregateWithKeys();
/* 104 */ }
/* 105 */
/* 106 */ // output the result
/* 107 */ while (agg_mapIter.next()) {
/* 108 */ agg_metricValue.add(1);
/* 109 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 110 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 111 */
/* 112 */ /* input[0, bigint] */
/* 113 */ long agg_value8 = agg_aggKey.getLong(0);
/* 114 */ /* input[0, bigint] */
/* 115 */ long agg_value9 = agg_aggBuffer1.getLong(0);
/* 116 */
/* 117 */ agg_rowWriter1.write(0, agg_value8);
/* 118 */
/* 119 */ agg_rowWriter1.write(1, agg_value9);
/* 120 */ append(agg_result1);
/* 121 */
/* 122 */ if (shouldStop()) return;
/* 123 */ }
/* 124 */
/* 125 */ agg_mapIter.close();
/* 126 */ if (agg_sorter == null) {
/* 127 */ agg_hashMap.free();
/* 128 */ }
/* 129 */ }
/* 130 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#207L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#207L,count#215L])
/* 007 */ +- Project [(id#204L & 2) AS k#207L]
/* 008 */ +- Range 0, 1, 1, 20971520, [id#204L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 014 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 015 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 016 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 017 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 018 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 019 */ private boolean range_initRange;
/* 020 */ private long range_partitionEnd;
/* 021 */ private long range_number;
/* 022 */ private boolean range_overflow;
/* 023 */ private scala.collection.Iterator range_input;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowJoiner agg_unsafeRowJoiner;
/* 028 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 029 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 030 */
/* 031 */ public GeneratedIterator(Object[] references) {
/* 032 */ this.references = references;
/* 033 */ }
/* 034 */
/* 035 */ public void init(scala.collection.Iterator inputs[]) {
/* 036 */ agg_initAgg = false;
/* 037 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 038 */ agg_hashMap = agg_plan.createHashMap();
/* 039 */
/* 040 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 041 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 042 */ range_initRange = false;
/* 043 */ range_partitionEnd = 0L;
/* 044 */ range_number = 0L;
/* 045 */ range_overflow = false;
/* 046 */ range_input = inputs[0];
/* 047 */ agg_result = new UnsafeRow(1);
/* 048 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 049 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 050 */ agg_unsafeRowJoiner = agg_plan.createUnsafeJoiner();
/* 051 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[2];
/* 052 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 053 */ }
/* 054 */
/* 055 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 056 */ /*** PRODUCE: Project [(id#204L & 2) AS k#207L] */
/* 057 */
/* 058 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#204L] */
/* 059 */
/* 060 */ // initialize Range
/* 061 */ if (!range_initRange) {
/* 062 */ range_initRange = true;
/* 063 */ if (range_input.hasNext()) {
/* 064 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 065 */ } else {
/* 066 */ return;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 071 */ long range_value = range_number;
/* 072 */ range_number += 1L;
/* 073 */ if (range_number < range_value ^ 1L < 0) {
/* 074 */ range_overflow = true;
/* 075 */ }
/* 076 */
/* 077 */ /*** CONSUME: Project [(id#204L & 2) AS k#207L] */
/* 078 */
/* 079 */ /*** CONSUME: TungstenAggregate(key=[k#207L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#207L,count#215L]) */
/* 080 */ /* (input[0, bigint] & 2) */
/* 081 */ long project_value = -1L;
/* 082 */ project_value = range_value & 2L;
/* 083 */
/* 084 */ // generate grouping key
/* 085 */ agg_rowWriter.write(0, project_value);
/* 086 */ /* hash(input[0, bigint], 42) */
/* 087 */ int agg_value1 = 42;
/* 088 */
/* 089 */ agg_value1 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(project_value, agg_value1);
/* 090 */ UnsafeRow agg_aggBuffer = null;
/* 091 */ if (true) {
/* 092 */ // try to get the buffer from hash map
/* 093 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 094 */ }
/* 095 */ if (agg_aggBuffer == null) {
/* 096 */ if (agg_sorter == null) {
/* 097 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 098 */ } else {
/* 099 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 100 */ }
/* 101 */
/* 102 */ // the hash map had be spilled, it should have enough memory now,
/* 103 */ // try to allocate buffer again.
/* 104 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 105 */ if (agg_aggBuffer == null) {
/* 106 */ // failed to allocate the first page
/* 107 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 108 */ }
/* 109 */ }
/* 110 */
/* 111 */ // evaluate aggregate function
/* 112 */ /* (input[0, bigint] + 1) */
/* 113 */ /* input[0, bigint] */
/* 114 */ long agg_value4 = agg_aggBuffer.getLong(0);
/* 115 */
/* 116 */ long agg_value3 = -1L;
/* 117 */ agg_value3 = agg_value4 + 1L;
/* 118 */ // update aggregate buffer
/* 119 */ agg_aggBuffer.setLong(0, agg_value3);
/* 120 */
/* 121 */ }
/* 122 */
/* 123 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 124 */ }
/* 125 */
/* 126 */ private void initRange(int idx) {
/* 127 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 128 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 129 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 130 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 131 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 132 */
/* 133 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 134 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 135 */ range_number = Long.MAX_VALUE;
/* 136 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 137 */ range_number = Long.MIN_VALUE;
/* 138 */ } else {
/* 139 */ range_number = st.longValue();
/* 140 */ }
/* 141 */
/* 142 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 143 */ .multiply(step).add(start);
/* 144 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 145 */ range_partitionEnd = Long.MAX_VALUE;
/* 146 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 147 */ range_partitionEnd = Long.MIN_VALUE;
/* 148 */ } else {
/* 149 */ range_partitionEnd = end.longValue();
/* 150 */ }
/* 151 */
/* 152 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 153 */ }
/* 154 */
/* 155 */ protected void processNext() throws java.io.IOException {
/* 156 */ /*** PRODUCE: TungstenAggregate(key=[k#207L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#207L,count#215L]) */
/* 157 */
/* 158 */ if (!agg_initAgg) {
/* 159 */ agg_initAgg = true;
/* 160 */ agg_doAggregateWithKeys();
/* 161 */ }
/* 162 */
/* 163 */ // output the result
/* 164 */ while (agg_mapIter.next()) {
/* 165 */ agg_metricValue.add(1);
/* 166 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 167 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 168 */
/* 169 */ UnsafeRow agg_resultRow = agg_unsafeRowJoiner.join(agg_aggKey, agg_aggBuffer1);
/* 170 */ append(agg_resultRow);
/* 171 */
/* 172 */ if (shouldStop()) return;
/* 173 */ }
/* 174 */
/* 175 */ agg_mapIter.close();
/* 176 */ if (agg_sorter == null) {
/* 177 */ agg_hashMap.free();
/* 178 */ }
/* 179 */ }
/* 180 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#219L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#219L,count#222L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 013 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 014 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 015 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 016 */ private scala.collection.Iterator inputadapter_input;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */ private UnsafeRow agg_result1;
/* 021 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder1;
/* 022 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter1;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 024 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 025 */
/* 026 */ public GeneratedIterator(Object[] references) {
/* 027 */ this.references = references;
/* 028 */ }
/* 029 */
/* 030 */ public void init(scala.collection.Iterator inputs[]) {
/* 031 */ agg_initAgg = false;
/* 032 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 033 */ agg_hashMap = agg_plan.createHashMap();
/* 034 */
/* 035 */ inputadapter_input = inputs[0];
/* 036 */ agg_result = new UnsafeRow(1);
/* 037 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 038 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 039 */ agg_result1 = new UnsafeRow(2);
/* 040 */ this.agg_holder1 = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result1, 0);
/* 041 */ this.agg_rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder1, 2);
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ }
/* 045 */
/* 046 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 047 */ /*** PRODUCE: INPUT */
/* 048 */
/* 049 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 050 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 051 */ /*** CONSUME: TungstenAggregate(key=[k#219L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#219L,count#222L]) */
/* 052 */ /* input[0, bigint] */
/* 053 */ long agg_value = inputadapter_row.getLong(0);
/* 054 */ /* input[1, bigint] */
/* 055 */ long agg_value1 = inputadapter_row.getLong(1);
/* 056 */
/* 057 */ // generate grouping key
/* 058 */ agg_rowWriter.write(0, agg_value);
/* 059 */ /* hash(input[0, bigint], 42) */
/* 060 */ int agg_value3 = 42;
/* 061 */
/* 062 */ agg_value3 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(agg_value, agg_value3);
/* 063 */ UnsafeRow agg_aggBuffer = null;
/* 064 */ if (true) {
/* 065 */ // try to get the buffer from hash map
/* 066 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 067 */ }
/* 068 */ if (agg_aggBuffer == null) {
/* 069 */ if (agg_sorter == null) {
/* 070 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 071 */ } else {
/* 072 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 073 */ }
/* 074 */
/* 075 */ // the hash map had be spilled, it should have enough memory now,
/* 076 */ // try to allocate buffer again.
/* 077 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 078 */ if (agg_aggBuffer == null) {
/* 079 */ // failed to allocate the first page
/* 080 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 081 */ }
/* 082 */ }
/* 083 */
/* 084 */ // evaluate aggregate function
/* 085 */ /* (input[0, bigint] + input[2, bigint]) */
/* 086 */ /* input[0, bigint] */
/* 087 */ long agg_value6 = agg_aggBuffer.getLong(0);
/* 088 */
/* 089 */ long agg_value5 = -1L;
/* 090 */ agg_value5 = agg_value6 + agg_value1;
/* 091 */ // update aggregate buffer
/* 092 */ agg_aggBuffer.setLong(0, agg_value5);
/* 093 */ }
/* 094 */
/* 095 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 096 */ }
/* 097 */
/* 098 */ protected void processNext() throws java.io.IOException {
/* 099 */ /*** PRODUCE: TungstenAggregate(key=[k#219L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#219L,count#222L]) */
/* 100 */
/* 101 */ if (!agg_initAgg) {
/* 102 */ agg_initAgg = true;
/* 103 */ agg_doAggregateWithKeys();
/* 104 */ }
/* 105 */
/* 106 */ // output the result
/* 107 */ while (agg_mapIter.next()) {
/* 108 */ agg_metricValue.add(1);
/* 109 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 110 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 111 */
/* 112 */ /* input[0, bigint] */
/* 113 */ long agg_value8 = agg_aggKey.getLong(0);
/* 114 */ /* input[0, bigint] */
/* 115 */ long agg_value9 = agg_aggBuffer1.getLong(0);
/* 116 */
/* 117 */ agg_rowWriter1.write(0, agg_value8);
/* 118 */
/* 119 */ agg_rowWriter1.write(1, agg_value9);
/* 120 */ append(agg_result1);
/* 121 */
/* 122 */ if (shouldStop()) return;
/* 123 */ }
/* 124 */
/* 125 */ agg_mapIter.close();
/* 126 */ if (agg_sorter == null) {
/* 127 */ agg_hashMap.free();
/* 128 */ }
/* 129 */ }
/* 130 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#219L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#219L,count#227L])
/* 007 */ +- Project [(id#216L & 2) AS k#219L]
/* 008 */ +- Range 0, 1, 1, 20971520, [id#216L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 014 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 015 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 016 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 017 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 018 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 019 */ private boolean range_initRange;
/* 020 */ private long range_partitionEnd;
/* 021 */ private long range_number;
/* 022 */ private boolean range_overflow;
/* 023 */ private scala.collection.Iterator range_input;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowJoiner agg_unsafeRowJoiner;
/* 028 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 029 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 030 */
/* 031 */ public GeneratedIterator(Object[] references) {
/* 032 */ this.references = references;
/* 033 */ }
/* 034 */
/* 035 */ public void init(scala.collection.Iterator inputs[]) {
/* 036 */ agg_initAgg = false;
/* 037 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 038 */ agg_hashMap = agg_plan.createHashMap();
/* 039 */
/* 040 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 041 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 042 */ range_initRange = false;
/* 043 */ range_partitionEnd = 0L;
/* 044 */ range_number = 0L;
/* 045 */ range_overflow = false;
/* 046 */ range_input = inputs[0];
/* 047 */ agg_result = new UnsafeRow(1);
/* 048 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 049 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 050 */ agg_unsafeRowJoiner = agg_plan.createUnsafeJoiner();
/* 051 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[2];
/* 052 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 053 */ }
/* 054 */
/* 055 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 056 */ /*** PRODUCE: Project [(id#216L & 2) AS k#219L] */
/* 057 */
/* 058 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#216L] */
/* 059 */
/* 060 */ // initialize Range
/* 061 */ if (!range_initRange) {
/* 062 */ range_initRange = true;
/* 063 */ if (range_input.hasNext()) {
/* 064 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 065 */ } else {
/* 066 */ return;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 071 */ long range_value = range_number;
/* 072 */ range_number += 1L;
/* 073 */ if (range_number < range_value ^ 1L < 0) {
/* 074 */ range_overflow = true;
/* 075 */ }
/* 076 */
/* 077 */ /*** CONSUME: Project [(id#216L & 2) AS k#219L] */
/* 078 */
/* 079 */ /*** CONSUME: TungstenAggregate(key=[k#219L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#219L,count#227L]) */
/* 080 */ /* (input[0, bigint] & 2) */
/* 081 */ long project_value = -1L;
/* 082 */ project_value = range_value & 2L;
/* 083 */
/* 084 */ // generate grouping key
/* 085 */ agg_rowWriter.write(0, project_value);
/* 086 */ /* hash(input[0, bigint], 42) */
/* 087 */ int agg_value1 = 42;
/* 088 */
/* 089 */ agg_value1 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(project_value, agg_value1);
/* 090 */ UnsafeRow agg_aggBuffer = null;
/* 091 */ if (true) {
/* 092 */ // try to get the buffer from hash map
/* 093 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 094 */ }
/* 095 */ if (agg_aggBuffer == null) {
/* 096 */ if (agg_sorter == null) {
/* 097 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 098 */ } else {
/* 099 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 100 */ }
/* 101 */
/* 102 */ // the hash map had be spilled, it should have enough memory now,
/* 103 */ // try to allocate buffer again.
/* 104 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 105 */ if (agg_aggBuffer == null) {
/* 106 */ // failed to allocate the first page
/* 107 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 108 */ }
/* 109 */ }
/* 110 */
/* 111 */ // evaluate aggregate function
/* 112 */ /* (input[0, bigint] + 1) */
/* 113 */ /* input[0, bigint] */
/* 114 */ long agg_value4 = agg_aggBuffer.getLong(0);
/* 115 */
/* 116 */ long agg_value3 = -1L;
/* 117 */ agg_value3 = agg_value4 + 1L;
/* 118 */ // update aggregate buffer
/* 119 */ agg_aggBuffer.setLong(0, agg_value3);
/* 120 */
/* 121 */ }
/* 122 */
/* 123 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 124 */ }
/* 125 */
/* 126 */ private void initRange(int idx) {
/* 127 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 128 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 129 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 130 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 131 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 132 */
/* 133 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 134 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 135 */ range_number = Long.MAX_VALUE;
/* 136 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 137 */ range_number = Long.MIN_VALUE;
/* 138 */ } else {
/* 139 */ range_number = st.longValue();
/* 140 */ }
/* 141 */
/* 142 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 143 */ .multiply(step).add(start);
/* 144 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 145 */ range_partitionEnd = Long.MAX_VALUE;
/* 146 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 147 */ range_partitionEnd = Long.MIN_VALUE;
/* 148 */ } else {
/* 149 */ range_partitionEnd = end.longValue();
/* 150 */ }
/* 151 */
/* 152 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 153 */ }
/* 154 */
/* 155 */ protected void processNext() throws java.io.IOException {
/* 156 */ /*** PRODUCE: TungstenAggregate(key=[k#219L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#219L,count#227L]) */
/* 157 */
/* 158 */ if (!agg_initAgg) {
/* 159 */ agg_initAgg = true;
/* 160 */ agg_doAggregateWithKeys();
/* 161 */ }
/* 162 */
/* 163 */ // output the result
/* 164 */ while (agg_mapIter.next()) {
/* 165 */ agg_metricValue.add(1);
/* 166 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 167 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 168 */
/* 169 */ UnsafeRow agg_resultRow = agg_unsafeRowJoiner.join(agg_aggKey, agg_aggBuffer1);
/* 170 */ append(agg_resultRow);
/* 171 */
/* 172 */ if (shouldStop()) return;
/* 173 */ }
/* 174 */
/* 175 */ agg_mapIter.close();
/* 176 */ if (agg_sorter == null) {
/* 177 */ agg_hashMap.free();
/* 178 */ }
/* 179 */ }
/* 180 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#231L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#231L,count#234L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 013 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 014 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 015 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 016 */ private scala.collection.Iterator inputadapter_input;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */ private UnsafeRow agg_result1;
/* 021 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder1;
/* 022 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter1;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 024 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 025 */
/* 026 */ public GeneratedIterator(Object[] references) {
/* 027 */ this.references = references;
/* 028 */ }
/* 029 */
/* 030 */ public void init(scala.collection.Iterator inputs[]) {
/* 031 */ agg_initAgg = false;
/* 032 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 033 */ agg_hashMap = agg_plan.createHashMap();
/* 034 */
/* 035 */ inputadapter_input = inputs[0];
/* 036 */ agg_result = new UnsafeRow(1);
/* 037 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 038 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 039 */ agg_result1 = new UnsafeRow(2);
/* 040 */ this.agg_holder1 = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result1, 0);
/* 041 */ this.agg_rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder1, 2);
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ }
/* 045 */
/* 046 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 047 */ /*** PRODUCE: INPUT */
/* 048 */
/* 049 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 050 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 051 */ /*** CONSUME: TungstenAggregate(key=[k#231L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#231L,count#234L]) */
/* 052 */ /* input[0, bigint] */
/* 053 */ long agg_value = inputadapter_row.getLong(0);
/* 054 */ /* input[1, bigint] */
/* 055 */ long agg_value1 = inputadapter_row.getLong(1);
/* 056 */
/* 057 */ // generate grouping key
/* 058 */ agg_rowWriter.write(0, agg_value);
/* 059 */ /* hash(input[0, bigint], 42) */
/* 060 */ int agg_value3 = 42;
/* 061 */
/* 062 */ agg_value3 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(agg_value, agg_value3);
/* 063 */ UnsafeRow agg_aggBuffer = null;
/* 064 */ if (true) {
/* 065 */ // try to get the buffer from hash map
/* 066 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 067 */ }
/* 068 */ if (agg_aggBuffer == null) {
/* 069 */ if (agg_sorter == null) {
/* 070 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 071 */ } else {
/* 072 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 073 */ }
/* 074 */
/* 075 */ // the hash map had be spilled, it should have enough memory now,
/* 076 */ // try to allocate buffer again.
/* 077 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 078 */ if (agg_aggBuffer == null) {
/* 079 */ // failed to allocate the first page
/* 080 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 081 */ }
/* 082 */ }
/* 083 */
/* 084 */ // evaluate aggregate function
/* 085 */ /* (input[0, bigint] + input[2, bigint]) */
/* 086 */ /* input[0, bigint] */
/* 087 */ long agg_value6 = agg_aggBuffer.getLong(0);
/* 088 */
/* 089 */ long agg_value5 = -1L;
/* 090 */ agg_value5 = agg_value6 + agg_value1;
/* 091 */ // update aggregate buffer
/* 092 */ agg_aggBuffer.setLong(0, agg_value5);
/* 093 */ }
/* 094 */
/* 095 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 096 */ }
/* 097 */
/* 098 */ protected void processNext() throws java.io.IOException {
/* 099 */ /*** PRODUCE: TungstenAggregate(key=[k#231L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#231L,count#234L]) */
/* 100 */
/* 101 */ if (!agg_initAgg) {
/* 102 */ agg_initAgg = true;
/* 103 */ agg_doAggregateWithKeys();
/* 104 */ }
/* 105 */
/* 106 */ // output the result
/* 107 */ while (agg_mapIter.next()) {
/* 108 */ agg_metricValue.add(1);
/* 109 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 110 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 111 */
/* 112 */ /* input[0, bigint] */
/* 113 */ long agg_value8 = agg_aggKey.getLong(0);
/* 114 */ /* input[0, bigint] */
/* 115 */ long agg_value9 = agg_aggBuffer1.getLong(0);
/* 116 */
/* 117 */ agg_rowWriter1.write(0, agg_value8);
/* 118 */
/* 119 */ agg_rowWriter1.write(1, agg_value9);
/* 120 */ append(agg_result1);
/* 121 */
/* 122 */ if (shouldStop()) return;
/* 123 */ }
/* 124 */
/* 125 */ agg_mapIter.close();
/* 126 */ if (agg_sorter == null) {
/* 127 */ agg_hashMap.free();
/* 128 */ }
/* 129 */ }
/* 130 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#231L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#231L,count#239L])
/* 007 */ +- Project [(id#228L & 2) AS k#231L]
/* 008 */ +- Range 0, 1, 1, 20971520, [id#228L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 014 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 015 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 016 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 017 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 018 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 019 */ private boolean range_initRange;
/* 020 */ private long range_partitionEnd;
/* 021 */ private long range_number;
/* 022 */ private boolean range_overflow;
/* 023 */ private scala.collection.Iterator range_input;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowJoiner agg_unsafeRowJoiner;
/* 028 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 029 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 030 */
/* 031 */ public GeneratedIterator(Object[] references) {
/* 032 */ this.references = references;
/* 033 */ }
/* 034 */
/* 035 */ public void init(scala.collection.Iterator inputs[]) {
/* 036 */ agg_initAgg = false;
/* 037 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 038 */ agg_hashMap = agg_plan.createHashMap();
/* 039 */
/* 040 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 041 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 042 */ range_initRange = false;
/* 043 */ range_partitionEnd = 0L;
/* 044 */ range_number = 0L;
/* 045 */ range_overflow = false;
/* 046 */ range_input = inputs[0];
/* 047 */ agg_result = new UnsafeRow(1);
/* 048 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 049 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 050 */ agg_unsafeRowJoiner = agg_plan.createUnsafeJoiner();
/* 051 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[2];
/* 052 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 053 */ }
/* 054 */
/* 055 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 056 */ /*** PRODUCE: Project [(id#228L & 2) AS k#231L] */
/* 057 */
/* 058 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#228L] */
/* 059 */
/* 060 */ // initialize Range
/* 061 */ if (!range_initRange) {
/* 062 */ range_initRange = true;
/* 063 */ if (range_input.hasNext()) {
/* 064 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 065 */ } else {
/* 066 */ return;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 071 */ long range_value = range_number;
/* 072 */ range_number += 1L;
/* 073 */ if (range_number < range_value ^ 1L < 0) {
/* 074 */ range_overflow = true;
/* 075 */ }
/* 076 */
/* 077 */ /*** CONSUME: Project [(id#228L & 2) AS k#231L] */
/* 078 */
/* 079 */ /*** CONSUME: TungstenAggregate(key=[k#231L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#231L,count#239L]) */
/* 080 */ /* (input[0, bigint] & 2) */
/* 081 */ long project_value = -1L;
/* 082 */ project_value = range_value & 2L;
/* 083 */
/* 084 */ // generate grouping key
/* 085 */ agg_rowWriter.write(0, project_value);
/* 086 */ /* hash(input[0, bigint], 42) */
/* 087 */ int agg_value1 = 42;
/* 088 */
/* 089 */ agg_value1 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(project_value, agg_value1);
/* 090 */ UnsafeRow agg_aggBuffer = null;
/* 091 */ if (true) {
/* 092 */ // try to get the buffer from hash map
/* 093 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 094 */ }
/* 095 */ if (agg_aggBuffer == null) {
/* 096 */ if (agg_sorter == null) {
/* 097 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 098 */ } else {
/* 099 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 100 */ }
/* 101 */
/* 102 */ // the hash map had be spilled, it should have enough memory now,
/* 103 */ // try to allocate buffer again.
/* 104 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 105 */ if (agg_aggBuffer == null) {
/* 106 */ // failed to allocate the first page
/* 107 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 108 */ }
/* 109 */ }
/* 110 */
/* 111 */ // evaluate aggregate function
/* 112 */ /* (input[0, bigint] + 1) */
/* 113 */ /* input[0, bigint] */
/* 114 */ long agg_value4 = agg_aggBuffer.getLong(0);
/* 115 */
/* 116 */ long agg_value3 = -1L;
/* 117 */ agg_value3 = agg_value4 + 1L;
/* 118 */ // update aggregate buffer
/* 119 */ agg_aggBuffer.setLong(0, agg_value3);
/* 120 */
/* 121 */ }
/* 122 */
/* 123 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 124 */ }
/* 125 */
/* 126 */ private void initRange(int idx) {
/* 127 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 128 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 129 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 130 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 131 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 132 */
/* 133 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 134 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 135 */ range_number = Long.MAX_VALUE;
/* 136 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 137 */ range_number = Long.MIN_VALUE;
/* 138 */ } else {
/* 139 */ range_number = st.longValue();
/* 140 */ }
/* 141 */
/* 142 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 143 */ .multiply(step).add(start);
/* 144 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 145 */ range_partitionEnd = Long.MAX_VALUE;
/* 146 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 147 */ range_partitionEnd = Long.MIN_VALUE;
/* 148 */ } else {
/* 149 */ range_partitionEnd = end.longValue();
/* 150 */ }
/* 151 */
/* 152 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 153 */ }
/* 154 */
/* 155 */ protected void processNext() throws java.io.IOException {
/* 156 */ /*** PRODUCE: TungstenAggregate(key=[k#231L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#231L,count#239L]) */
/* 157 */
/* 158 */ if (!agg_initAgg) {
/* 159 */ agg_initAgg = true;
/* 160 */ agg_doAggregateWithKeys();
/* 161 */ }
/* 162 */
/* 163 */ // output the result
/* 164 */ while (agg_mapIter.next()) {
/* 165 */ agg_metricValue.add(1);
/* 166 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 167 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 168 */
/* 169 */ UnsafeRow agg_resultRow = agg_unsafeRowJoiner.join(agg_aggKey, agg_aggBuffer1);
/* 170 */ append(agg_resultRow);
/* 171 */
/* 172 */ if (shouldStop()) return;
/* 173 */ }
/* 174 */
/* 175 */ agg_mapIter.close();
/* 176 */ if (agg_sorter == null) {
/* 177 */ agg_hashMap.free();
/* 178 */ }
/* 179 */ }
/* 180 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#243L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#243L,count#246L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 013 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 014 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 015 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 016 */ private scala.collection.Iterator inputadapter_input;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */ private UnsafeRow agg_result1;
/* 021 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder1;
/* 022 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter1;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 024 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 025 */
/* 026 */ public GeneratedIterator(Object[] references) {
/* 027 */ this.references = references;
/* 028 */ }
/* 029 */
/* 030 */ public void init(scala.collection.Iterator inputs[]) {
/* 031 */ agg_initAgg = false;
/* 032 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 033 */ agg_hashMap = agg_plan.createHashMap();
/* 034 */
/* 035 */ inputadapter_input = inputs[0];
/* 036 */ agg_result = new UnsafeRow(1);
/* 037 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 038 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 039 */ agg_result1 = new UnsafeRow(2);
/* 040 */ this.agg_holder1 = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result1, 0);
/* 041 */ this.agg_rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder1, 2);
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ }
/* 045 */
/* 046 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 047 */ /*** PRODUCE: INPUT */
/* 048 */
/* 049 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 050 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 051 */ /*** CONSUME: TungstenAggregate(key=[k#243L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#243L,count#246L]) */
/* 052 */ /* input[0, bigint] */
/* 053 */ long agg_value = inputadapter_row.getLong(0);
/* 054 */ /* input[1, bigint] */
/* 055 */ long agg_value1 = inputadapter_row.getLong(1);
/* 056 */
/* 057 */ // generate grouping key
/* 058 */ agg_rowWriter.write(0, agg_value);
/* 059 */ /* hash(input[0, bigint], 42) */
/* 060 */ int agg_value3 = 42;
/* 061 */
/* 062 */ agg_value3 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(agg_value, agg_value3);
/* 063 */ UnsafeRow agg_aggBuffer = null;
/* 064 */ if (true) {
/* 065 */ // try to get the buffer from hash map
/* 066 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 067 */ }
/* 068 */ if (agg_aggBuffer == null) {
/* 069 */ if (agg_sorter == null) {
/* 070 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 071 */ } else {
/* 072 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 073 */ }
/* 074 */
/* 075 */ // the hash map had be spilled, it should have enough memory now,
/* 076 */ // try to allocate buffer again.
/* 077 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 078 */ if (agg_aggBuffer == null) {
/* 079 */ // failed to allocate the first page
/* 080 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 081 */ }
/* 082 */ }
/* 083 */
/* 084 */ // evaluate aggregate function
/* 085 */ /* (input[0, bigint] + input[2, bigint]) */
/* 086 */ /* input[0, bigint] */
/* 087 */ long agg_value6 = agg_aggBuffer.getLong(0);
/* 088 */
/* 089 */ long agg_value5 = -1L;
/* 090 */ agg_value5 = agg_value6 + agg_value1;
/* 091 */ // update aggregate buffer
/* 092 */ agg_aggBuffer.setLong(0, agg_value5);
/* 093 */ }
/* 094 */
/* 095 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 096 */ }
/* 097 */
/* 098 */ protected void processNext() throws java.io.IOException {
/* 099 */ /*** PRODUCE: TungstenAggregate(key=[k#243L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#243L,count#246L]) */
/* 100 */
/* 101 */ if (!agg_initAgg) {
/* 102 */ agg_initAgg = true;
/* 103 */ agg_doAggregateWithKeys();
/* 104 */ }
/* 105 */
/* 106 */ // output the result
/* 107 */ while (agg_mapIter.next()) {
/* 108 */ agg_metricValue.add(1);
/* 109 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 110 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 111 */
/* 112 */ /* input[0, bigint] */
/* 113 */ long agg_value8 = agg_aggKey.getLong(0);
/* 114 */ /* input[0, bigint] */
/* 115 */ long agg_value9 = agg_aggBuffer1.getLong(0);
/* 116 */
/* 117 */ agg_rowWriter1.write(0, agg_value8);
/* 118 */
/* 119 */ agg_rowWriter1.write(1, agg_value9);
/* 120 */ append(agg_result1);
/* 121 */
/* 122 */ if (shouldStop()) return;
/* 123 */ }
/* 124 */
/* 125 */ agg_mapIter.close();
/* 126 */ if (agg_sorter == null) {
/* 127 */ agg_hashMap.free();
/* 128 */ }
/* 129 */ }
/* 130 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#243L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#243L,count#251L])
/* 007 */ +- Project [(id#240L & 2) AS k#243L]
/* 008 */ +- Range 0, 1, 1, 20971520, [id#240L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 014 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 015 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 016 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 017 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 018 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 019 */ private boolean range_initRange;
/* 020 */ private long range_partitionEnd;
/* 021 */ private long range_number;
/* 022 */ private boolean range_overflow;
/* 023 */ private scala.collection.Iterator range_input;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowJoiner agg_unsafeRowJoiner;
/* 028 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 029 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 030 */
/* 031 */ public GeneratedIterator(Object[] references) {
/* 032 */ this.references = references;
/* 033 */ }
/* 034 */
/* 035 */ public void init(scala.collection.Iterator inputs[]) {
/* 036 */ agg_initAgg = false;
/* 037 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 038 */ agg_hashMap = agg_plan.createHashMap();
/* 039 */
/* 040 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 041 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 042 */ range_initRange = false;
/* 043 */ range_partitionEnd = 0L;
/* 044 */ range_number = 0L;
/* 045 */ range_overflow = false;
/* 046 */ range_input = inputs[0];
/* 047 */ agg_result = new UnsafeRow(1);
/* 048 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 049 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 050 */ agg_unsafeRowJoiner = agg_plan.createUnsafeJoiner();
/* 051 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[2];
/* 052 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 053 */ }
/* 054 */
/* 055 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 056 */ /*** PRODUCE: Project [(id#240L & 2) AS k#243L] */
/* 057 */
/* 058 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#240L] */
/* 059 */
/* 060 */ // initialize Range
/* 061 */ if (!range_initRange) {
/* 062 */ range_initRange = true;
/* 063 */ if (range_input.hasNext()) {
/* 064 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 065 */ } else {
/* 066 */ return;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 071 */ long range_value = range_number;
/* 072 */ range_number += 1L;
/* 073 */ if (range_number < range_value ^ 1L < 0) {
/* 074 */ range_overflow = true;
/* 075 */ }
/* 076 */
/* 077 */ /*** CONSUME: Project [(id#240L & 2) AS k#243L] */
/* 078 */
/* 079 */ /*** CONSUME: TungstenAggregate(key=[k#243L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#243L,count#251L]) */
/* 080 */ /* (input[0, bigint] & 2) */
/* 081 */ long project_value = -1L;
/* 082 */ project_value = range_value & 2L;
/* 083 */
/* 084 */ // generate grouping key
/* 085 */ agg_rowWriter.write(0, project_value);
/* 086 */ /* hash(input[0, bigint], 42) */
/* 087 */ int agg_value1 = 42;
/* 088 */
/* 089 */ agg_value1 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(project_value, agg_value1);
/* 090 */ UnsafeRow agg_aggBuffer = null;
/* 091 */ if (true) {
/* 092 */ // try to get the buffer from hash map
/* 093 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 094 */ }
/* 095 */ if (agg_aggBuffer == null) {
/* 096 */ if (agg_sorter == null) {
/* 097 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 098 */ } else {
/* 099 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 100 */ }
/* 101 */
/* 102 */ // the hash map had be spilled, it should have enough memory now,
/* 103 */ // try to allocate buffer again.
/* 104 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 105 */ if (agg_aggBuffer == null) {
/* 106 */ // failed to allocate the first page
/* 107 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 108 */ }
/* 109 */ }
/* 110 */
/* 111 */ // evaluate aggregate function
/* 112 */ /* (input[0, bigint] + 1) */
/* 113 */ /* input[0, bigint] */
/* 114 */ long agg_value4 = agg_aggBuffer.getLong(0);
/* 115 */
/* 116 */ long agg_value3 = -1L;
/* 117 */ agg_value3 = agg_value4 + 1L;
/* 118 */ // update aggregate buffer
/* 119 */ agg_aggBuffer.setLong(0, agg_value3);
/* 120 */
/* 121 */ }
/* 122 */
/* 123 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 124 */ }
/* 125 */
/* 126 */ private void initRange(int idx) {
/* 127 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 128 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 129 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 130 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 131 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 132 */
/* 133 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 134 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 135 */ range_number = Long.MAX_VALUE;
/* 136 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 137 */ range_number = Long.MIN_VALUE;
/* 138 */ } else {
/* 139 */ range_number = st.longValue();
/* 140 */ }
/* 141 */
/* 142 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 143 */ .multiply(step).add(start);
/* 144 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 145 */ range_partitionEnd = Long.MAX_VALUE;
/* 146 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 147 */ range_partitionEnd = Long.MIN_VALUE;
/* 148 */ } else {
/* 149 */ range_partitionEnd = end.longValue();
/* 150 */ }
/* 151 */
/* 152 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 153 */ }
/* 154 */
/* 155 */ protected void processNext() throws java.io.IOException {
/* 156 */ /*** PRODUCE: TungstenAggregate(key=[k#243L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#243L,count#251L]) */
/* 157 */
/* 158 */ if (!agg_initAgg) {
/* 159 */ agg_initAgg = true;
/* 160 */ agg_doAggregateWithKeys();
/* 161 */ }
/* 162 */
/* 163 */ // output the result
/* 164 */ while (agg_mapIter.next()) {
/* 165 */ agg_metricValue.add(1);
/* 166 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 167 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 168 */
/* 169 */ UnsafeRow agg_resultRow = agg_unsafeRowJoiner.join(agg_aggKey, agg_aggBuffer1);
/* 170 */ append(agg_resultRow);
/* 171 */
/* 172 */ if (shouldStop()) return;
/* 173 */ }
/* 174 */
/* 175 */ agg_mapIter.close();
/* 176 */ if (agg_sorter == null) {
/* 177 */ agg_hashMap.free();
/* 178 */ }
/* 179 */ }
/* 180 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#255L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#255L,count#258L])
/* 007 */ +- INPUT
/* 008 */ */
/* 009 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 010 */ private Object[] references;
/* 011 */ private boolean agg_initAgg;
/* 012 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 013 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 014 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 015 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 016 */ private scala.collection.Iterator inputadapter_input;
/* 017 */ private UnsafeRow agg_result;
/* 018 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 019 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 020 */ private UnsafeRow agg_result1;
/* 021 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder1;
/* 022 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter1;
/* 023 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 024 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 025 */
/* 026 */ public GeneratedIterator(Object[] references) {
/* 027 */ this.references = references;
/* 028 */ }
/* 029 */
/* 030 */ public void init(scala.collection.Iterator inputs[]) {
/* 031 */ agg_initAgg = false;
/* 032 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 033 */ agg_hashMap = agg_plan.createHashMap();
/* 034 */
/* 035 */ inputadapter_input = inputs[0];
/* 036 */ agg_result = new UnsafeRow(1);
/* 037 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 038 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 039 */ agg_result1 = new UnsafeRow(2);
/* 040 */ this.agg_holder1 = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result1, 0);
/* 041 */ this.agg_rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder1, 2);
/* 042 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 043 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 044 */ }
/* 045 */
/* 046 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 047 */ /*** PRODUCE: INPUT */
/* 048 */
/* 049 */ while (!shouldStop() && inputadapter_input.hasNext()) {
/* 050 */ InternalRow inputadapter_row = (InternalRow) inputadapter_input.next();
/* 051 */ /*** CONSUME: TungstenAggregate(key=[k#255L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#255L,count#258L]) */
/* 052 */ /* input[0, bigint] */
/* 053 */ long agg_value = inputadapter_row.getLong(0);
/* 054 */ /* input[1, bigint] */
/* 055 */ long agg_value1 = inputadapter_row.getLong(1);
/* 056 */
/* 057 */ // generate grouping key
/* 058 */ agg_rowWriter.write(0, agg_value);
/* 059 */ /* hash(input[0, bigint], 42) */
/* 060 */ int agg_value3 = 42;
/* 061 */
/* 062 */ agg_value3 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(agg_value, agg_value3);
/* 063 */ UnsafeRow agg_aggBuffer = null;
/* 064 */ if (true) {
/* 065 */ // try to get the buffer from hash map
/* 066 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 067 */ }
/* 068 */ if (agg_aggBuffer == null) {
/* 069 */ if (agg_sorter == null) {
/* 070 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 071 */ } else {
/* 072 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 073 */ }
/* 074 */
/* 075 */ // the hash map had be spilled, it should have enough memory now,
/* 076 */ // try to allocate buffer again.
/* 077 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value3);
/* 078 */ if (agg_aggBuffer == null) {
/* 079 */ // failed to allocate the first page
/* 080 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 081 */ }
/* 082 */ }
/* 083 */
/* 084 */ // evaluate aggregate function
/* 085 */ /* (input[0, bigint] + input[2, bigint]) */
/* 086 */ /* input[0, bigint] */
/* 087 */ long agg_value6 = agg_aggBuffer.getLong(0);
/* 088 */
/* 089 */ long agg_value5 = -1L;
/* 090 */ agg_value5 = agg_value6 + agg_value1;
/* 091 */ // update aggregate buffer
/* 092 */ agg_aggBuffer.setLong(0, agg_value5);
/* 093 */ }
/* 094 */
/* 095 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 096 */ }
/* 097 */
/* 098 */ protected void processNext() throws java.io.IOException {
/* 099 */ /*** PRODUCE: TungstenAggregate(key=[k#255L], functions=[(count(1),mode=Final,isDistinct=false)], output=[k#255L,count#258L]) */
/* 100 */
/* 101 */ if (!agg_initAgg) {
/* 102 */ agg_initAgg = true;
/* 103 */ agg_doAggregateWithKeys();
/* 104 */ }
/* 105 */
/* 106 */ // output the result
/* 107 */ while (agg_mapIter.next()) {
/* 108 */ agg_metricValue.add(1);
/* 109 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 110 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 111 */
/* 112 */ /* input[0, bigint] */
/* 113 */ long agg_value8 = agg_aggKey.getLong(0);
/* 114 */ /* input[0, bigint] */
/* 115 */ long agg_value9 = agg_aggBuffer1.getLong(0);
/* 116 */
/* 117 */ agg_rowWriter1.write(0, agg_value8);
/* 118 */
/* 119 */ agg_rowWriter1.write(1, agg_value9);
/* 120 */ append(agg_result1);
/* 121 */
/* 122 */ if (shouldStop()) return;
/* 123 */ }
/* 124 */
/* 125 */ agg_mapIter.close();
/* 126 */ if (agg_sorter == null) {
/* 127 */ agg_hashMap.free();
/* 128 */ }
/* 129 */ }
/* 130 */ }
/* 001 */ public Object generate(Object[] references) {
/* 002 */ return new GeneratedIterator(references);
/* 003 */ }
/* 004 */
/* 005 */ /** Codegened pipeline for:
/* 006 */ * TungstenAggregate(key=[k#255L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#255L,count#263L])
/* 007 */ +- Project [(id#252L & 2) AS k#255L]
/* 008 */ +- Range 0, 1, 1, 20971520, [id#252L]
/* 009 */ */
/* 010 */ class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 011 */ private Object[] references;
/* 012 */ private boolean agg_initAgg;
/* 013 */ private org.apache.spark.sql.execution.aggregate.TungstenAggregate agg_plan;
/* 014 */ private org.apache.spark.sql.execution.UnsafeFixedWidthAggregationMap agg_hashMap;
/* 015 */ private org.apache.spark.sql.execution.UnsafeKVExternalSorter agg_sorter;
/* 016 */ private org.apache.spark.unsafe.KVIterator agg_mapIter;
/* 017 */ private org.apache.spark.sql.execution.metric.LongSQLMetric range_numOutputRows;
/* 018 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue range_metricValue;
/* 019 */ private boolean range_initRange;
/* 020 */ private long range_partitionEnd;
/* 021 */ private long range_number;
/* 022 */ private boolean range_overflow;
/* 023 */ private scala.collection.Iterator range_input;
/* 024 */ private UnsafeRow agg_result;
/* 025 */ private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
/* 026 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
/* 027 */ private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowJoiner agg_unsafeRowJoiner;
/* 028 */ private org.apache.spark.sql.execution.metric.LongSQLMetric agg_numOutputRows;
/* 029 */ private org.apache.spark.sql.execution.metric.LongSQLMetricValue agg_metricValue;
/* 030 */
/* 031 */ public GeneratedIterator(Object[] references) {
/* 032 */ this.references = references;
/* 033 */ }
/* 034 */
/* 035 */ public void init(scala.collection.Iterator inputs[]) {
/* 036 */ agg_initAgg = false;
/* 037 */ this.agg_plan = (org.apache.spark.sql.execution.aggregate.TungstenAggregate) references[0];
/* 038 */ agg_hashMap = agg_plan.createHashMap();
/* 039 */
/* 040 */ this.range_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[1];
/* 041 */ range_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) range_numOutputRows.localValue();
/* 042 */ range_initRange = false;
/* 043 */ range_partitionEnd = 0L;
/* 044 */ range_number = 0L;
/* 045 */ range_overflow = false;
/* 046 */ range_input = inputs[0];
/* 047 */ agg_result = new UnsafeRow(1);
/* 048 */ this.agg_holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
/* 049 */ this.agg_rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 1);
/* 050 */ agg_unsafeRowJoiner = agg_plan.createUnsafeJoiner();
/* 051 */ this.agg_numOutputRows = (org.apache.spark.sql.execution.metric.LongSQLMetric) references[2];
/* 052 */ agg_metricValue = (org.apache.spark.sql.execution.metric.LongSQLMetricValue) agg_numOutputRows.localValue();
/* 053 */ }
/* 054 */
/* 055 */ private void agg_doAggregateWithKeys() throws java.io.IOException {
/* 056 */ /*** PRODUCE: Project [(id#252L & 2) AS k#255L] */
/* 057 */
/* 058 */ /*** PRODUCE: Range 0, 1, 1, 20971520, [id#252L] */
/* 059 */
/* 060 */ // initialize Range
/* 061 */ if (!range_initRange) {
/* 062 */ range_initRange = true;
/* 063 */ if (range_input.hasNext()) {
/* 064 */ initRange(((InternalRow) range_input.next()).getInt(0));
/* 065 */ } else {
/* 066 */ return;
/* 067 */ }
/* 068 */ }
/* 069 */
/* 070 */ while (!range_overflow && range_number < range_partitionEnd && !shouldStop()) {
/* 071 */ long range_value = range_number;
/* 072 */ range_number += 1L;
/* 073 */ if (range_number < range_value ^ 1L < 0) {
/* 074 */ range_overflow = true;
/* 075 */ }
/* 076 */
/* 077 */ /*** CONSUME: Project [(id#252L & 2) AS k#255L] */
/* 078 */
/* 079 */ /*** CONSUME: TungstenAggregate(key=[k#255L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#255L,count#263L]) */
/* 080 */ /* (input[0, bigint] & 2) */
/* 081 */ long project_value = -1L;
/* 082 */ project_value = range_value & 2L;
/* 083 */
/* 084 */ // generate grouping key
/* 085 */ agg_rowWriter.write(0, project_value);
/* 086 */ /* hash(input[0, bigint], 42) */
/* 087 */ int agg_value1 = 42;
/* 088 */
/* 089 */ agg_value1 = org.apache.spark.unsafe.hash.Murmur3_x86_32.hashLong(project_value, agg_value1);
/* 090 */ UnsafeRow agg_aggBuffer = null;
/* 091 */ if (true) {
/* 092 */ // try to get the buffer from hash map
/* 093 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 094 */ }
/* 095 */ if (agg_aggBuffer == null) {
/* 096 */ if (agg_sorter == null) {
/* 097 */ agg_sorter = agg_hashMap.destructAndCreateExternalSorter();
/* 098 */ } else {
/* 099 */ agg_sorter.merge(agg_hashMap.destructAndCreateExternalSorter());
/* 100 */ }
/* 101 */
/* 102 */ // the hash map had be spilled, it should have enough memory now,
/* 103 */ // try to allocate buffer again.
/* 104 */ agg_aggBuffer = agg_hashMap.getAggregationBufferFromUnsafeRow(agg_result, agg_value1);
/* 105 */ if (agg_aggBuffer == null) {
/* 106 */ // failed to allocate the first page
/* 107 */ throw new OutOfMemoryError("No enough memory for aggregation");
/* 108 */ }
/* 109 */ }
/* 110 */
/* 111 */ // evaluate aggregate function
/* 112 */ /* (input[0, bigint] + 1) */
/* 113 */ /* input[0, bigint] */
/* 114 */ long agg_value4 = agg_aggBuffer.getLong(0);
/* 115 */
/* 116 */ long agg_value3 = -1L;
/* 117 */ agg_value3 = agg_value4 + 1L;
/* 118 */ // update aggregate buffer
/* 119 */ agg_aggBuffer.setLong(0, agg_value3);
/* 120 */
/* 121 */ }
/* 122 */
/* 123 */ agg_mapIter = agg_plan.finishAggregate(agg_hashMap, agg_sorter);
/* 124 */ }
/* 125 */
/* 126 */ private void initRange(int idx) {
/* 127 */ java.math.BigInteger index = java.math.BigInteger.valueOf(idx);
/* 128 */ java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L);
/* 129 */ java.math.BigInteger numElement = java.math.BigInteger.valueOf(20971520L);
/* 130 */ java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
/* 131 */ java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
/* 132 */
/* 133 */ java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start);
/* 134 */ if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 135 */ range_number = Long.MAX_VALUE;
/* 136 */ } else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 137 */ range_number = Long.MIN_VALUE;
/* 138 */ } else {
/* 139 */ range_number = st.longValue();
/* 140 */ }
/* 141 */
/* 142 */ java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
/* 143 */ .multiply(step).add(start);
/* 144 */ if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
/* 145 */ range_partitionEnd = Long.MAX_VALUE;
/* 146 */ } else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
/* 147 */ range_partitionEnd = Long.MIN_VALUE;
/* 148 */ } else {
/* 149 */ range_partitionEnd = end.longValue();
/* 150 */ }
/* 151 */
/* 152 */ range_metricValue.add((range_partitionEnd - range_number) / 1L);
/* 153 */ }
/* 154 */
/* 155 */ protected void processNext() throws java.io.IOException {
/* 156 */ /*** PRODUCE: TungstenAggregate(key=[k#255L], functions=[(count(1),mode=Partial,isDistinct=false)], output=[k#255L,count#263L]) */
/* 157 */
/* 158 */ if (!agg_initAgg) {
/* 159 */ agg_initAgg = true;
/* 160 */ agg_doAggregateWithKeys();
/* 161 */ }
/* 162 */
/* 163 */ // output the result
/* 164 */ while (agg_mapIter.next()) {
/* 165 */ agg_metricValue.add(1);
/* 166 */ UnsafeRow agg_aggKey = (UnsafeRow) agg_mapIter.getKey();
/* 167 */ UnsafeRow agg_aggBuffer1 = (UnsafeRow) agg_mapIter.getValue();
/* 168 */
/* 169 */ UnsafeRow agg_resultRow = agg_unsafeRowJoiner.join(agg_aggKey, agg_aggBuffer1);
/* 170 */ append(agg_resultRow);
/* 171 */
/* 172 */ if (shouldStop()) return;
/* 173 */ }
/* 174 */
/* 175 */ agg_mapIter.close();
/* 176 */ if (agg_sorter == null) {
/* 177 */ agg_hashMap.free();
/* 178 */ }
/* 179 */ }
/* 180 */ }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment