digraph G {
0 [labelType="html" label="<b>Execute InsertIntoHadoopFsRelationCommand</b><br><br>number of written files: 31<br>written output: 2.1 KiB<br>number of output rows: 31<br>number of dynamic part: 0"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (7)\n \nduration: total (min, med, max (stageId: taskId))\n872 ms (20 ms, 27 ms, 53 ms (stage 41.0: task 2688))";
2 [labelType="html" label="<b>Sort</b><br><br>sort time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 41.0: task 2689))<br>peak memory total (min, med, max (stageId: taskId))<br>1985.9 MiB (64.1 MiB, 64.1 MiB, 64.1 MiB (stage 41.0: task 2689))<br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 41.0: task 2689))"];
}
3 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 31<br>shuffle write time total (min, med, max (stageId: taskId))<br>24 ms (0 ms, 0 ms, 4 ms (stage 40.0: task 2488))<br>records read: 31<br>local bytes read total (min, med, max (stageId: taskId))<br>1983.0 B (63.0 B, 64.0 B, 64.0 B (stage 41.0: task 2689))<br>fetch wait time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 41.0: task 2689))<br>local blocks read: 31<br>data size total (min, med, max (stageId: taskId))<br>744.0 B (0.0 B, 0.0 B, 48.0 B (stage 40.0: task 2492))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>1983.0 B (0.0 B, 0.0 B, 128.0 B (stage 40.0: task 2492))"];
subgraph cluster4 {
isCluster="true";
label="WholeStageCodegen (6)\n \nduration: total (min, med, max (stageId: taskId))\n451 ms (0 ms, 0 ms, 25 ms (stage 36.0: task 2414))";
5 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>297 ms (0 ms, 0 ms, 11 ms (stage 40.0: task 2488))<br>peak memory total (min, med, max (stageId: taskId))<br>3.6 GiB (256.0 KiB, 256.0 KiB, 64.3 MiB (stage 40.0: task 2489))<br>number of output rows: 62<br>avg hash probe bucket list iters (min, med, max (stageId: taskId)):<br>(1, 1, 1 (stage 40.0: task 2489))"];
}
6 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 6,200<br>shuffle write time total (min, med, max (stageId: taskId))<br>893 ms (3 ms, 4 ms, 8 ms (stage 35.0: task 2101))<br>records read: 12,400<br>local bytes read total (min, med, max (stageId: taskId))<br>357.7 KiB (0.0 B, 0.0 B, 8.4 KiB (stage 36.0: task 2305))<br>fetch wait time total (min, med, max (stageId: taskId))<br>31 ms (0 ms, 0 ms, 5 ms (stage 40.0: task 2491))<br>remote bytes read total (min, med, max (stageId: taskId))<br>352.1 KiB (0.0 B, 0.0 B, 8.4 KiB (stage 40.0: task 2505))<br>local blocks read: 5,636<br>remote blocks read: 5,564<br>data size total (min, med, max (stageId: taskId))<br>145.3 KiB (744.0 B, 744.0 B, 744.0 B (stage 35.0: task 2088))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>354.9 KiB (1810.0 B, 1817.0 B, 1826.0 B (stage 35.0: task 2096))"];
subgraph cluster7 {
isCluster="true";
label="WholeStageCodegen (5)\n \nduration: total (min, med, max (stageId: taskId))\n1.5 s (5 ms, 8 ms, 68 ms (stage 35.0: task 2089))";
8 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>285 ms (1 ms, 1 ms, 60 ms (stage 35.0: task 2089))<br>peak memory total (min, med, max (stageId: taskId))<br>50.0 MiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 35.0: task 2088))<br>number of output rows: 6,200"];
9 [labelType="html" label="<br><b>Project</b><br><br>"];
10 [labelType="html" label="<b>SortMergeJoin</b><br><br>number of output rows: 72,118"];
}
subgraph cluster11 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: total (min, med, max (stageId: taskId))\n558 ms (0 ms, 3 ms, 64 ms (stage 35.0: task 2089))";
12 [labelType="html" label="<b>Sort</b><br><br>sort time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 35.0: task 2088))<br>peak memory total (min, med, max (stageId: taskId))<br>12.5 GiB (64.1 MiB, 64.1 MiB, 64.1 MiB (stage 35.0: task 2088))<br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 35.0: task 2088))"];
}
13 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 156,095<br>shuffle write time total (min, med, max (stageId: taskId))<br>42 ms (0 ms, 0 ms, 26 ms (stage 34.0: task 2086))<br>records read: 156,095<br>local bytes read total (min, med, max (stageId: taskId))<br>4.9 MiB (16.0 KiB, 19.9 KiB, 36.5 KiB (stage 35.0: task 2220))<br>fetch wait time total (min, med, max (stageId: taskId))<br>8 ms (0 ms, 0 ms, 7 ms (stage 35.0: task 2239))<br>remote bytes read total (min, med, max (stageId: taskId))<br>5.2 MiB (16.3 KiB, 31.2 KiB, 37.7 KiB (stage 35.0: task 2160))<br>local blocks read: 200<br>remote blocks read: 200<br>data size total (min, med, max (stageId: taskId))<br>14.3 MiB (0.0 B, 0.0 B, 9.2 MiB (stage 34.0: task 2086))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>10.0 MiB (0.0 B, 0.0 B, 6.4 MiB (stage 34.0: task 2086))"];
subgraph cluster14 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n8.7 s (134 ms, 2.1 s, 2.2 s (stage 34.0: task 2083))";
15 [labelType="html" label="<br><b>Project</b><br><br>"];
16 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 156,095"];
}
17 [labelType="html" label="<b>Scan csv </b><br><br>number of files read: 1<br>metadata time: 0 ms<br>size of files read: 519.0 MiB<br>number of output rows: 500,001"];
subgraph cluster18 {
isCluster="true";
label="WholeStageCodegen (4)\n \nduration: total (min, med, max (stageId: taskId))\n539 ms (0 ms, 3 ms, 61 ms (stage 35.0: task 2089))";
19 [labelType="html" label="<b>Sort</b><br><br>sort time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 35.0: task 2088))<br>peak memory total (min, med, max (stageId: taskId))<br>12.5 GiB (64.1 MiB, 64.1 MiB, 64.1 MiB (stage 35.0: task 2088))<br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 35.0: task 2088))"];
}
20 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 171,282<br>shuffle write time total (min, med, max (stageId: taskId))<br>50 ms (23 ms, 26 ms, 26 ms (stage 33.0: task 2081))<br>records read: 171,282<br>local bytes read total (min, med, max (stageId: taskId))<br>5.7 MiB (24.5 KiB, 28.9 KiB, 34.6 KiB (stage 35.0: task 2197))<br>fetch wait time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 35.0: task 2088))<br>remote bytes read total (min, med, max (stageId: taskId))<br>5.7 MiB (24.4 KiB, 29.2 KiB, 34.1 KiB (stage 35.0: task 2177))<br>local blocks read: 200<br>remote blocks read: 200<br>data size total (min, med, max (stageId: taskId))<br>17.0 MiB (8.2 MiB, 8.8 MiB, 8.8 MiB (stage 33.0: task 2081))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>11.4 MiB (5.5 MiB, 6.0 MiB, 6.0 MiB (stage 33.0: task 2081))"];
subgraph cluster21 {
isCluster="true";
label="WholeStageCodegen (3)\n \nduration: total (min, med, max (stageId: taskId))\n3.0 s (1.4 s, 1.6 s, 1.6 s (stage 33.0: task 2081))";
22 [labelType="html" label="<br><b>Project</b><br><br>"];
23 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 171,282"];
}
24 [labelType="html" label="<b>Scan csv </b><br><br>number of files read: 1<br>metadata time: 0 ms<br>size of files read: 173.6 MiB<br>number of output rows: 171,282"];
2->0;
3->2;
5->3;
6->5;
8->6;
9->8;
10->9;
12->10;
13->12;
15->13;
16->15;
17->16;
19->10;
20->19;
22->20;
23->22;
24->23;
}
25
Execute InsertIntoHadoopFsRelationCommand file:/opt/spark/work-dir/visualisations/oct_transaction_fees.csv, false, CSV, Map(header -> true, path -> ./visualisations/oct_transaction_fees.csv), ErrorIfExists, [formatted_date, total_transaction_fee]
Sort [formatted_date#114 ASC NULLS FIRST], true, 0
WholeStageCodegen (7)
Exchange rangepartitioning(formatted_date#114 ASC NULLS FIRST, 200), true, [id=#483]
HashAggregate(keys=[formatted_date#114], functions=[sum((cast(gas#78 as bigint) * gas_price#79L))])
WholeStageCodegen (6)
Exchange hashpartitioning(formatted_date#114, 200), true, [id=#479]
HashAggregate(keys=[formatted_date#114], functions=[partial_sum((cast(gas#78 as bigint) * gas_price#79L))])
Project [formatted_date#114, gas#78, gas_price#79L]
SortMergeJoin [hash#17], [block_hash#72], Inner
WholeStageCodegen (5)
Sort [hash#17 ASC NULLS FIRST], false, 0
WholeStageCodegen (2)
Exchange hashpartitioning(hash#17, 200), true, [id=#461]
Project [hash#17, cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) AS formatted_date#114]
Filter (((cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) < 16740) AND (cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) > 16708)) AND isnotnull(hash#17))
WholeStageCodegen (1)
FileScan csv [hash#17,timestamp#32] Batched: false, DataFilters: [(cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) < 167..., Format: CSV, Location: InMemoryFileIndex[s3a://data-repository-bkt/ECS765/ethereum/blocks.csv], PartitionFilters: [], PushedFilters: [IsNotNull(hash)], ReadSchema: struct<hash:string,timestamp:int>
Sort [block_hash#72 ASC NULLS FIRST], false, 0
WholeStageCodegen (4)
Exchange hashpartitioning(block_hash#72, 200), true, [id=#470]
Project [block_hash#72, gas#78, gas_price#79L]
Filter ((isnotnull(transaction_index#74) AND (transaction_index#74 = 0)) AND isnotnull(block_hash#72))
WholeStageCodegen (3)
FileScan csv [block_hash#72,transaction_index#74,gas#78,gas_price#79L] Batched: false, DataFilters: [isnotnull(transaction_index#74), (transaction_index#74 = 0), isnotnull(block_hash#72)], Format: CSV, Location: InMemoryFileIndex[s3a://data-repository-bkt/ECS765/ethereum/transactions.csv], PartitionFilters: [], PushedFilters: [IsNotNull(transaction_index), EqualTo(transaction_index,0), IsNotNull(block_hash)], ReadSchema: struct<block_hash:string,transaction_index:int,gas:int,gas_price:bigint>
== Parsed Logical Plan ==
InsertIntoHadoopFsRelationCommand file:/opt/spark/work-dir/visualisations/oct_transaction_fees.csv, false, CSV, Map(header -> true, path -> ./visualisations/oct_transaction_fees.csv), ErrorIfExists, [formatted_date, total_transaction_fee]
+- Sort [formatted_date#114 ASC NULLS FIRST], true
+- Aggregate [formatted_date#114], [formatted_date#114, sum((cast(gas#78 as bigint) * gas_price#79L)) AS total_transaction_fee#427L]
+- Filter (transaction_index#74 = 0)
+- Filter ((formatted_date#114 < cast(2015-11-01 as date)) AND (formatted_date#114 > cast(2015-09-30 as date)))
+- Project [number#16, parent_hash#18, nonce#19, sha3_uncles#20, logs_bloom#21, transactions_root#22, state_root#23, receipts_root#24, miner#25, difficulty#26L, total_difficulty#27, size#28, extra_data#29, gas_limit#30, gas_used#31, timestamp#32, transaction_count#33, base_fee_per_gas#34, formatted_date#114, hash#70, nonce#71, block_hash#72, block_number#73, transaction_index#74, ... 10 more fields]
+- Join Inner, (hash#17 = block_hash#72)
:- Project [number#16, hash#17, parent_hash#18, nonce#19, sha3_uncles#20, logs_bloom#21, transactions_root#22, state_root#23, receipts_root#24, miner#25, difficulty#26L, total_difficulty#27, size#28, extra_data#29, gas_limit#30, gas_used#31, timestamp#32, transaction_count#33, base_fee_per_gas#34, to_date(from_unixtime('timestamp, yyyy-MM-dd HH:mm:ss, None), None) AS formatted_date#114]
: +- Relation[number#16,hash#17,parent_hash#18,nonce#19,sha3_uncles#20,logs_bloom#21,transactions_root#22,state_root#23,receipts_root#24,miner#25,difficulty#26L,total_difficulty#27,size#28,extra_data#29,gas_limit#30,gas_used#31,timestamp#32,transaction_count#33,base_fee_per_gas#34] csv
+- Relation[hash#70,nonce#71,block_hash#72,block_number#73,transaction_index#74,from_address#75,to_address#76,value#77,gas#78,gas_price#79L,input#80,block_timestamp#81,max_fee_per_gas#82,max_priority_fee_per_gas#83,transaction_type#84] csv
== Analyzed Logical Plan ==
InsertIntoHadoopFsRelationCommand file:/opt/spark/work-dir/visualisations/oct_transaction_fees.csv, false, CSV, Map(header -> true, path -> ./visualisations/oct_transaction_fees.csv), ErrorIfExists, [formatted_date, total_transaction_fee]
+- Sort [formatted_date#114 ASC NULLS FIRST], true
+- Aggregate [formatted_date#114], [formatted_date#114, sum((cast(gas#78 as bigint) * gas_price#79L)) AS total_transaction_fee#427L]
+- Filter (transaction_index#74 = 0)
+- Filter ((formatted_date#114 < cast(2015-11-01 as date)) AND (formatted_date#114 > cast(2015-09-30 as date)))
+- Project [number#16, parent_hash#18, nonce#19, sha3_uncles#20, logs_bloom#21, transactions_root#22, state_root#23, receipts_root#24, miner#25, difficulty#26L, total_difficulty#27, size#28, extra_data#29, gas_limit#30, gas_used#31, timestamp#32, transaction_count#33, base_fee_per_gas#34, formatted_date#114, hash#70, nonce#71, block_hash#72, block_number#73, transaction_index#74, ... 10 more fields]
+- Join Inner, (hash#17 = block_hash#72)
:- Project [number#16, hash#17, parent_hash#18, nonce#19, sha3_uncles#20, logs_bloom#21, transactions_root#22, state_root#23, receipts_root#24, miner#25, difficulty#26L, total_difficulty#27, size#28, extra_data#29, gas_limit#30, gas_used#31, timestamp#32, transaction_count#33, base_fee_per_gas#34, to_date(from_unixtime('timestamp, yyyy-MM-dd HH:mm:ss, None), None) AS formatted_date#114]
: +- Relation[number#16,hash#17,parent_hash#18,nonce#19,sha3_uncles#20,logs_bloom#21,transactions_root#22,state_root#23,receipts_root#24,miner#25,difficulty#26L,total_difficulty#27,size#28,extra_data#29,gas_limit#30,gas_used#31,timestamp#32,transaction_count#33,base_fee_per_gas#34] csv
+- Relation[hash#70,nonce#71,block_hash#72,block_number#73,transaction_index#74,from_address#75,to_address#76,value#77,gas#78,gas_price#79L,input#80,block_timestamp#81,max_fee_per_gas#82,max_priority_fee_per_gas#83,transaction_type#84] csv
== Optimized Logical Plan ==
InsertIntoHadoopFsRelationCommand file:/opt/spark/work-dir/visualisations/oct_transaction_fees.csv, false, CSV, Map(header -> true, path -> ./visualisations/oct_transaction_fees.csv), ErrorIfExists, [formatted_date, total_transaction_fee]
+- Sort [formatted_date#114 ASC NULLS FIRST], true
+- Aggregate [formatted_date#114], [formatted_date#114, sum((cast(gas#78 as bigint) * gas_price#79L)) AS total_transaction_fee#427L]
+- Project [formatted_date#114, gas#78, gas_price#79L]
+- Join Inner, (hash#17 = block_hash#72)
:- Project [hash#17, cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) AS formatted_date#114]
: +- Filter (((cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) < 16740) AND (cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) > 16708)) AND isnotnull(hash#17))
: +- Relation[number#16,hash#17,parent_hash#18,nonce#19,sha3_uncles#20,logs_bloom#21,transactions_root#22,state_root#23,receipts_root#24,miner#25,difficulty#26L,total_difficulty#27,size#28,extra_data#29,gas_limit#30,gas_used#31,timestamp#32,transaction_count#33,base_fee_per_gas#34] csv
+- Project [block_hash#72, gas#78, gas_price#79L]
+- Filter ((isnotnull(transaction_index#74) AND (transaction_index#74 = 0)) AND isnotnull(block_hash#72))
+- Relation[hash#70,nonce#71,block_hash#72,block_number#73,transaction_index#74,from_address#75,to_address#76,value#77,gas#78,gas_price#79L,input#80,block_timestamp#81,max_fee_per_gas#82,max_priority_fee_per_gas#83,transaction_type#84] csv
== Physical Plan ==
Execute InsertIntoHadoopFsRelationCommand file:/opt/spark/work-dir/visualisations/oct_transaction_fees.csv, false, CSV, Map(header -> true, path -> ./visualisations/oct_transaction_fees.csv), ErrorIfExists, [formatted_date, total_transaction_fee]
+- *(7) Sort [formatted_date#114 ASC NULLS FIRST], true, 0
+- Exchange rangepartitioning(formatted_date#114 ASC NULLS FIRST, 200), true, [id=#483]
+- *(6) HashAggregate(keys=[formatted_date#114], functions=[sum((cast(gas#78 as bigint) * gas_price#79L))], output=[formatted_date#114, total_transaction_fee#427L])
+- Exchange hashpartitioning(formatted_date#114, 200), true, [id=#479]
+- *(5) HashAggregate(keys=[formatted_date#114], functions=[partial_sum((cast(gas#78 as bigint) * gas_price#79L))], output=[formatted_date#114, sum#437L])
+- *(5) Project [formatted_date#114, gas#78, gas_price#79L]
+- *(5) SortMergeJoin [hash#17], [block_hash#72], Inner
:- *(2) Sort [hash#17 ASC NULLS FIRST], false, 0
: +- Exchange hashpartitioning(hash#17, 200), true, [id=#461]
: +- *(1) Project [hash#17, cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) AS formatted_date#114]
: +- *(1) Filter (((cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) < 16740) AND (cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) > 16708)) AND isnotnull(hash#17))
: +- FileScan csv [hash#17,timestamp#32] Batched: false, DataFilters: [(cast(from_unixtime(cast(timestamp#32 as bigint), yyyy-MM-dd HH:mm:ss, Some(GMT)) as date) < 167..., Format: CSV, Location: InMemoryFileIndex[s3a://data-repository-bkt/ECS765/ethereum/blocks.csv], PartitionFilters: [], PushedFilters: [IsNotNull(hash)], ReadSchema: struct<hash:string,timestamp:int>
+- *(4) Sort [block_hash#72 ASC NULLS FIRST], false, 0
+- Exchange hashpartitioning(block_hash#72, 200), true, [id=#470]
+- *(3) Project [block_hash#72, gas#78, gas_price#79L]
+- *(3) Filter ((isnotnull(transaction_index#74) AND (transaction_index#74 = 0)) AND isnotnull(block_hash#72))
+- FileScan csv [block_hash#72,transaction_index#74,gas#78,gas_price#79L] Batched: false, DataFilters: [isnotnull(transaction_index#74), (transaction_index#74 = 0), isnotnull(block_hash#72)], Format: CSV, Location: InMemoryFileIndex[s3a://data-repository-bkt/ECS765/ethereum/transactions.csv], PartitionFilters: [], PushedFilters: [IsNotNull(transaction_index), EqualTo(transaction_index,0), IsNotNull(block_hash)], ReadSchema: struct<block_hash:string,transaction_index:int,gas:int,gas_price:bigint>