digraph G {
0 [labelType="html" label="<br><b>WriteToDataSourceV2</b><br><br>"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (6)\n \nduration: total (min, med, max (stageId: taskId))\n300 ms (0 ms, 1 ms, 22 ms (stage 7.0: task 685))";
2 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>20 ms (0 ms, 0 ms, 20 ms (stage 7.0: task 685))<br>peak memory total (min, med, max (stageId: taskId))<br>178.0 MiB (256.0 KiB, 256.0 KiB, 64.3 MiB (stage 7.0: task 685))<br>number of output rows: 2<br>avg hash probe bucket list iters (min, med, max (stageId: taskId)):<br>(1, 1, 1 (stage 7.0: task 685))"];
}
3 [labelType="html" label="<b>StateStoreSave</b><br><br>number of total state rows: 2<br>memory used by state total (min, med, max (stageId: taskId))<br>78.6 KiB (400.0 B, 400.0 B, 696.0 B (stage 7.0: task 689))<br>count of cache hit on states cache in provider: 1,200<br>number of output rows: 2<br>estimated size of state only on current version total (min, med, max (stageId: taskId))<br>17.6 KiB (88.0 B, 88.0 B, 288.0 B (stage 7.0: task 685))<br>time to commit changes total (min, med, max (stageId: taskId))<br>11.6 s (42 ms, 54 ms, 271 ms (stage 7.0: task 789))<br>time to remove total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 7.0: task 608))<br>number of updated state rows: 2<br>time to update total (min, med, max (stageId: taskId))<br>475 ms (1 ms, 1 ms, 66 ms (stage 7.0: task 685))"];
subgraph cluster4 {
isCluster="true";
label="WholeStageCodegen (5)\n \nduration: total (min, med, max (stageId: taskId))\n476 ms (1 ms, 1 ms, 66 ms (stage 7.0: task 685))";
5 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 7.0: task 608))<br>peak memory total (min, med, max (stageId: taskId))<br>50.0 MiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 7.0: task 608))<br>number of output rows: 2"];
}
6 [labelType="html" label="<b>StateStoreRestore</b><br><br>number of output rows: 2"];
subgraph cluster7 {
isCluster="true";
label="WholeStageCodegen (4)\n \nduration: total (min, med, max (stageId: taskId))\n1.7 s (3 ms, 6 ms, 225 ms (stage 7.0: task 685))";
8 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>188 ms (0 ms, 0 ms, 188 ms (stage 7.0: task 685))<br>peak memory total (min, med, max (stageId: taskId))<br>50.0 MiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 7.0: task 608))<br>number of output rows: 2"];
}
9 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 4<br>shuffle write time total (min, med, max (stageId: taskId))<br>13 ms (5 ms, 8 ms, 8 ms (stage 6.0: task 606))<br>records read: 4<br>local bytes read total (min, med, max (stageId: taskId))<br>168.0 B (0.0 B, 0.0 B, 88.0 B (stage 7.0: task 685))<br>fetch wait time total (min, med, max (stageId: taskId))<br>177 ms (0 ms, 0 ms, 177 ms (stage 7.0: task 685))<br>remote bytes read total (min, med, max (stageId: taskId))<br>168.0 B (0.0 B, 0.0 B, 88.0 B (stage 7.0: task 685))<br>local blocks read: 2<br>remote blocks read: 2<br>data size total (min, med, max (stageId: taskId))<br>176.0 B (88.0 B, 88.0 B, 88.0 B (stage 6.0: task 607))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>336.0 B (168.0 B, 168.0 B, 168.0 B (stage 6.0: task 607))"];
subgraph cluster10 {
isCluster="true";
label="WholeStageCodegen (3)\n \nduration: total (min, med, max (stageId: taskId))\n19 ms (9 ms, 10 ms, 10 ms (stage 6.0: task 606))";
11 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>2 ms (1 ms, 1 ms, 1 ms (stage 6.0: task 607))<br>peak memory total (min, med, max (stageId: taskId))<br>512.0 KiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 6.0: task 607))<br>number of output rows: 4"];
12 [labelType="html" label="<br><b>Project</b><br><br>"];
}
13 [labelType="html" label="<br><b>EventTimeWatermark</b><br><br>"];
subgraph cluster14 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: total (min, med, max (stageId: taskId))\n10 ms (5 ms, 5 ms, 5 ms (stage 6.0: task 607))";
15 [labelType="html" label="<br><b>Project</b><br><br>"];
16 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 10"];
}
17 [labelType="html" label="<b>Generate</b><br><br>number of output rows: 10"];
subgraph cluster18 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n15 ms (7 ms, 8 ms, 8 ms (stage 6.0: task 606))";
19 [labelType="html" label="<br><b>Project</b><br><br>"];
}
20 [labelType="html" label="<b>MicroBatchScan</b><br><br>number of output rows: 10"];
2->0;
3->2;
5->3;
6->5;
8->6;
9->8;
11->9;
12->11;
13->12;
15->13;
16->15;
17->16;
19->17;
20->19;
}
21
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1030a195
HashAggregate(keys=[hostname#12], functions=[count(1)])
WholeStageCodegen (6)
StateStoreSave [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 3, numPartitions = 200], Complete, 1733343806025, 2
HashAggregate(keys=[hostname#12], functions=[merge_count(1)])
WholeStageCodegen (5)
StateStoreRestore [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 3, numPartitions = 200], 2
HashAggregate(keys=[hostname#12], functions=[merge_count(1)])
WholeStageCodegen (4)
Exchange hashpartitioning(hostname#12, 200), true, [id=#340]
HashAggregate(keys=[hostname#12], functions=[partial_count(1)])
Project [split(logs#5, ,, -1)[1] AS hostname#12]
WholeStageCodegen (3)
EventTimeWatermark timestamp#1: timestamp, 3 seconds
Project [logs#5, timestamp#1]
Filter ((split(logs#5, ,, -1)[3] = GET) AND (split(logs#5, ,, -1)[5] = 200))
WholeStageCodegen (2)
Generate explode(split(value#0, , -1)), [timestamp#1], false, [logs#5]
Project [value#0, timestamp#1]
WholeStageCodegen (1)
MicroBatchScan[value#0, timestamp#1] class org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1
== Parsed Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1030a195
+- Project [hostname#12, count#67L AS correct_count#70L]
+- Aggregate [hostname#12], [hostname#12, count(1) AS count#67L]
+- Filter ((method#23 = GET) AND (responsecode#38 = 200))
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, responsecode#38, split(logs#5, ,, -1)[6] AS bytes#47]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, split(logs#5, ,, -1)[5] AS responsecode#38]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, split(logs#5, ,, -1)[4] AS url#30]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, split(logs#5, ,, -1)[3] AS method#23]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, split(logs#5, ,, -1)[2] AS time#17]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, split(logs#5, ,, -1)[1] AS hostname#12]
+- Project [logs#5, timestamp#1-T3000ms, split(logs#5, ,, -1)[0] AS idx#8]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@484fdb6, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 26, 36
== Analyzed Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1030a195
+- Project [hostname#12, count#67L AS correct_count#70L]
+- Aggregate [hostname#12], [hostname#12, count(1) AS count#67L]
+- Filter ((method#23 = GET) AND (responsecode#38 = 200))
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, responsecode#38, split(logs#5, ,, -1)[6] AS bytes#47]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, split(logs#5, ,, -1)[5] AS responsecode#38]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, split(logs#5, ,, -1)[4] AS url#30]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, split(logs#5, ,, -1)[3] AS method#23]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, split(logs#5, ,, -1)[2] AS time#17]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, split(logs#5, ,, -1)[1] AS hostname#12]
+- Project [logs#5, timestamp#1-T3000ms, split(logs#5, ,, -1)[0] AS idx#8]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@484fdb6, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 26, 36
== Optimized Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1030a195
+- Aggregate [hostname#12], [hostname#12, count(1) AS correct_count#70L]
+- Project [split(logs#5, ,, -1)[1] AS hostname#12]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- Project [logs#5, timestamp#1]
+- Filter ((split(logs#5, ,, -1)[3] = GET) AND (split(logs#5, ,, -1)[5] = 200))
+- Generate explode(split(value#0, , -1)), [0], false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@484fdb6, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 26, 36
== Physical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1030a195
+- *(6) HashAggregate(keys=[hostname#12], functions=[count(1)], output=[hostname#12, correct_count#70L])
+- StateStoreSave [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 3, numPartitions = 200], Complete, 1733343806025, 2
+- *(5) HashAggregate(keys=[hostname#12], functions=[merge_count(1)], output=[hostname#12, count#76L])
+- StateStoreRestore [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 3, numPartitions = 200], 2
+- *(4) HashAggregate(keys=[hostname#12], functions=[merge_count(1)], output=[hostname#12, count#76L])
+- Exchange hashpartitioning(hostname#12, 200), true, [id=#340]
+- *(3) HashAggregate(keys=[hostname#12], functions=[partial_count(1)], output=[hostname#12, count#76L])
+- *(3) Project [split(logs#5, ,, -1)[1] AS hostname#12]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- *(2) Project [logs#5, timestamp#1]
+- *(2) Filter ((split(logs#5, ,, -1)[3] = GET) AND (split(logs#5, ,, -1)[5] = 200))
+- Generate explode(split(value#0, , -1)), [timestamp#1], false, [logs#5]
+- *(1) Project [value#0, timestamp#1]
+- MicroBatchScan[value#0, timestamp#1] class org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1