digraph G {
0 [labelType="html" label="<br><b>WriteToDataSourceV2</b><br><br>"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (6)\n \nduration: total (min, med, max (stageId: taskId))\n249 ms (0 ms, 1 ms, 4 ms (stage 9.0: task 860))";
2 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>2 ms (0 ms, 0 ms, 1 ms (stage 9.0: task 860))<br>peak memory total (min, med, max (stageId: taskId))<br>242.0 MiB (256.0 KiB, 256.0 KiB, 64.3 MiB (stage 9.0: task 860))<br>number of output rows: 3<br>avg hash probe bucket list iters (min, med, max (stageId: taskId)):<br>(1, 1, 1 (stage 9.0: task 860))"];
}
3 [labelType="html" label="<b>StateStoreSave</b><br><br>number of total state rows: 3<br>memory used by state total (min, med, max (stageId: taskId))<br>78.8 KiB (400.0 B, 400.0 B, 704.0 B (stage 9.0: task 885))<br>count of cache hit on states cache in provider: 1,600<br>number of output rows: 3<br>estimated size of state only on current version total (min, med, max (stageId: taskId))<br>17.8 KiB (88.0 B, 88.0 B, 288.0 B (stage 9.0: task 885))<br>time to commit changes total (min, med, max (stageId: taskId))<br>11.5 s (37 ms, 55 ms, 168 ms (stage 9.0: task 913))<br>time to remove total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 9.0: task 811))<br>number of updated state rows: 2<br>time to update total (min, med, max (stageId: taskId))<br>370 ms (1 ms, 1 ms, 18 ms (stage 9.0: task 885))"];
subgraph cluster4 {
isCluster="true";
label="WholeStageCodegen (5)\n \nduration: total (min, med, max (stageId: taskId))\n371 ms (1 ms, 1 ms, 18 ms (stage 9.0: task 885))";
5 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>1 ms (0 ms, 0 ms, 1 ms (stage 9.0: task 885))<br>peak memory total (min, med, max (stageId: taskId))<br>50.0 MiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 9.0: task 811))<br>number of output rows: 2"];
}
6 [labelType="html" label="<b>StateStoreRestore</b><br><br>number of output rows: 2"];
subgraph cluster7 {
isCluster="true";
label="WholeStageCodegen (4)\n \nduration: total (min, med, max (stageId: taskId))\n1.3 s (3 ms, 6 ms, 24 ms (stage 9.0: task 856))";
8 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 9.0: task 811))<br>peak memory total (min, med, max (stageId: taskId))<br>50.0 MiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 9.0: task 811))<br>number of output rows: 2"];
}
9 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 3<br>shuffle write time total (min, med, max (stageId: taskId))<br>9 ms (4 ms, 4 ms, 4 ms (stage 8.0: task 809))<br>records read: 3<br>local bytes read total (min, med, max (stageId: taskId))<br>168.0 B (0.0 B, 0.0 B, 88.0 B (stage 9.0: task 885))<br>fetch wait time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 9.0: task 860))<br>remote bytes read total (min, med, max (stageId: taskId))<br>80.0 B (0.0 B, 0.0 B, 80.0 B (stage 9.0: task 860))<br>local blocks read: 2<br>remote blocks read: 1<br>data size total (min, med, max (stageId: taskId))<br>128.0 B (40.0 B, 88.0 B, 88.0 B (stage 8.0: task 808))<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>248.0 B (80.0 B, 168.0 B, 168.0 B (stage 8.0: task 808))"];
subgraph cluster10 {
isCluster="true";
label="WholeStageCodegen (3)\n \nduration: total (min, med, max (stageId: taskId))\n18 ms (9 ms, 9 ms, 9 ms (stage 8.0: task 808))";
11 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>2 ms (1 ms, 1 ms, 1 ms (stage 8.0: task 808))<br>peak memory total (min, med, max (stageId: taskId))<br>512.0 KiB (256.0 KiB, 256.0 KiB, 256.0 KiB (stage 8.0: task 808))<br>number of output rows: 3"];
12 [labelType="html" label="<br><b>Project</b><br><br>"];
}
13 [labelType="html" label="<br><b>EventTimeWatermark</b><br><br>"];
subgraph cluster14 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: total (min, med, max (stageId: taskId))\n10 ms (5 ms, 5 ms, 5 ms (stage 8.0: task 808))";
15 [labelType="html" label="<br><b>Project</b><br><br>"];
16 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 11"];
}
17 [labelType="html" label="<b>Generate</b><br><br>number of output rows: 11"];
subgraph cluster18 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n14 ms (7 ms, 7 ms, 7 ms (stage 8.0: task 808))";
19 [labelType="html" label="<br><b>Project</b><br><br>"];
}
20 [labelType="html" label="<b>MicroBatchScan</b><br><br>number of output rows: 11"];
2->0;
3->2;
5->3;
6->5;
8->6;
9->8;
11->9;
12->11;
13->12;
15->13;
16->15;
17->16;
19->17;
20->19;
}
21
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@7d35ee79
HashAggregate(keys=[hostname#12], functions=[count(1)])
WholeStageCodegen (6)
StateStoreSave [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 4, numPartitions = 200], Complete, 1733343816035, 2
HashAggregate(keys=[hostname#12], functions=[merge_count(1)])
WholeStageCodegen (5)
StateStoreRestore [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 4, numPartitions = 200], 2
HashAggregate(keys=[hostname#12], functions=[merge_count(1)])
WholeStageCodegen (4)
Exchange hashpartitioning(hostname#12, 200), true, [id=#429]
HashAggregate(keys=[hostname#12], functions=[partial_count(1)])
Project [split(logs#5, ,, -1)[1] AS hostname#12]
WholeStageCodegen (3)
EventTimeWatermark timestamp#1: timestamp, 3 seconds
Project [logs#5, timestamp#1]
Filter ((split(logs#5, ,, -1)[3] = GET) AND (split(logs#5, ,, -1)[5] = 200))
WholeStageCodegen (2)
Generate explode(split(value#0, , -1)), [timestamp#1], false, [logs#5]
Project [value#0, timestamp#1]
WholeStageCodegen (1)
MicroBatchScan[value#0, timestamp#1] class org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1
== Parsed Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@7d35ee79
+- Project [hostname#12, count#67L AS correct_count#70L]
+- Aggregate [hostname#12], [hostname#12, count(1) AS count#67L]
+- Filter ((method#23 = GET) AND (responsecode#38 = 200))
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, responsecode#38, split(logs#5, ,, -1)[6] AS bytes#47]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, split(logs#5, ,, -1)[5] AS responsecode#38]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, split(logs#5, ,, -1)[4] AS url#30]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, split(logs#5, ,, -1)[3] AS method#23]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, split(logs#5, ,, -1)[2] AS time#17]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, split(logs#5, ,, -1)[1] AS hostname#12]
+- Project [logs#5, timestamp#1-T3000ms, split(logs#5, ,, -1)[0] AS idx#8]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@484fdb6, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 36, 47
== Analyzed Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@7d35ee79
+- Project [hostname#12, count#67L AS correct_count#70L]
+- Aggregate [hostname#12], [hostname#12, count(1) AS count#67L]
+- Filter ((method#23 = GET) AND (responsecode#38 = 200))
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, responsecode#38, split(logs#5, ,, -1)[6] AS bytes#47]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, url#30, split(logs#5, ,, -1)[5] AS responsecode#38]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, method#23, split(logs#5, ,, -1)[4] AS url#30]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, time#17, split(logs#5, ,, -1)[3] AS method#23]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, hostname#12, split(logs#5, ,, -1)[2] AS time#17]
+- Project [logs#5, timestamp#1-T3000ms, idx#8, split(logs#5, ,, -1)[1] AS hostname#12]
+- Project [logs#5, timestamp#1-T3000ms, split(logs#5, ,, -1)[0] AS idx#8]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@484fdb6, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 36, 47
== Optimized Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@7d35ee79
+- Aggregate [hostname#12], [hostname#12, count(1) AS correct_count#70L]
+- Project [split(logs#5, ,, -1)[1] AS hostname#12]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- Project [logs#5, timestamp#1]
+- Filter ((split(logs#5, ,, -1)[3] = GET) AND (split(logs#5, ,, -1)[5] = 200))
+- Generate explode(split(value#0, , -1)), [0], false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@484fdb6, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 36, 47
== Physical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@7d35ee79
+- *(6) HashAggregate(keys=[hostname#12], functions=[count(1)], output=[hostname#12, correct_count#70L])
+- StateStoreSave [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 4, numPartitions = 200], Complete, 1733343816035, 2
+- *(5) HashAggregate(keys=[hostname#12], functions=[merge_count(1)], output=[hostname#12, count#76L])
+- StateStoreRestore [hostname#12], state info [ checkpoint = file:/tmp/temporary-7dde7be9-c7f2-4a1a-96c6-fb19f120126d/state, runId = 10bd72d4-04b3-48e3-97a6-62b1f13a8094, opId = 0, ver = 4, numPartitions = 200], 2
+- *(4) HashAggregate(keys=[hostname#12], functions=[merge_count(1)], output=[hostname#12, count#76L])
+- Exchange hashpartitioning(hostname#12, 200), true, [id=#429]
+- *(3) HashAggregate(keys=[hostname#12], functions=[partial_count(1)], output=[hostname#12, count#76L])
+- *(3) Project [split(logs#5, ,, -1)[1] AS hostname#12]
+- EventTimeWatermark timestamp#1: timestamp, 3 seconds
+- *(2) Project [logs#5, timestamp#1]
+- *(2) Filter ((split(logs#5, ,, -1)[3] = GET) AND (split(logs#5, ,, -1)[5] = 200))
+- Generate explode(split(value#0, , -1)), [timestamp#1], false, [logs#5]
+- *(1) Project [value#0, timestamp#1]
+- MicroBatchScan[value#0, timestamp#1] class org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1