digraph G {
0 [labelType="html" label="<br><b>WriteToDataSourceV2</b><br><br>"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (6)";
2 [labelType="html" label="<br><b>HashAggregate</b><br><br>"];
}
3 [labelType="html" label="<br><b>StateStoreSave</b><br><br>"];
subgraph cluster4 {
isCluster="true";
label="WholeStageCodegen (5)";
5 [labelType="html" label="<br><b>HashAggregate</b><br><br>"];
}
6 [labelType="html" label="<br><b>StateStoreRestore</b><br><br>"];
subgraph cluster7 {
isCluster="true";
label="WholeStageCodegen (4)";
8 [labelType="html" label="<br><b>HashAggregate</b><br><br>"];
}
9 [labelType="html" label="<br><b>Exchange</b><br><br>"];
subgraph cluster10 {
isCluster="true";
label="WholeStageCodegen (3)";
11 [labelType="html" label="<br><b>HashAggregate</b><br><br>"];
12 [labelType="html" label="<br><b>Project</b><br><br>"];
}
13 [labelType="html" label="<br><b>EventTimeWatermark</b><br><br>"];
subgraph cluster14 {
isCluster="true";
label="WholeStageCodegen (2)";
15 [labelType="html" label="<br><b>Project</b><br><br>"];
}
16 [labelType="html" label="<br><b>Generate</b><br><br>"];
subgraph cluster17 {
isCluster="true";
label="WholeStageCodegen (1)";
18 [labelType="html" label="<br><b>Project</b><br><br>"];
}
19 [labelType="html" label="<br><b>MicroBatchScan</b><br><br>"];
2->0;
3->2;
5->3;
6->5;
8->6;
9->8;
11->9;
12->11;
13->12;
15->13;
16->15;
18->16;
19->18;
}
20
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@236377c2
HashAggregate(keys=[hostname#12, responsecode#38], functions=[count(1)])
WholeStageCodegen (6)
StateStoreSave [hostname#12, responsecode#38], state info [ checkpoint = file:/tmp/temporary-1de1ee23-414b-4a88-8dc3-b55201323fb3/state, runId = 05d0ae24-6241-4634-a3bd-732191bb756e, opId = 0, ver = 4, numPartitions = 200], Complete, 1742479452540, 2
HashAggregate(keys=[hostname#12, responsecode#38], functions=[merge_count(1)])
WholeStageCodegen (5)
StateStoreRestore [hostname#12, responsecode#38], state info [ checkpoint = file:/tmp/temporary-1de1ee23-414b-4a88-8dc3-b55201323fb3/state, runId = 05d0ae24-6241-4634-a3bd-732191bb756e, opId = 0, ver = 4, numPartitions = 200], 2
HashAggregate(keys=[hostname#12, responsecode#38], functions=[merge_count(1)])
WholeStageCodegen (4)
Exchange hashpartitioning(hostname#12, responsecode#38, 200), true, [id=#404]
HashAggregate(keys=[hostname#12, responsecode#38], functions=[partial_count(1)])
Project [split(logs#5, ,, -1)[1] AS hostname#12, split(logs#5, ,, -1)[5] AS responsecode#38]
WholeStageCodegen (3)
EventTimeWatermark timestamp#1: timestamp, 5 seconds
Project [logs#5, timestamp#1]
WholeStageCodegen (2)
Generate explode(split(value#0, , -1)), [timestamp#1], false, [logs#5]
Project [value#0, timestamp#1]
WholeStageCodegen (1)
MicroBatchScan[value#0, timestamp#1] class org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1
== Parsed Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@236377c2
+- Aggregate [hostname#12, responsecode#38], [hostname#12, responsecode#38, count(1) AS count#67L]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, method#23, resource#30, responsecode#38, split(logs#5, ,, -1)[6] AS bytes#47]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, method#23, resource#30, split(logs#5, ,, -1)[5] AS responsecode#38]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, method#23, split(logs#5, ,, -1)[4] AS resource#30]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, split(logs#5, ,, -1)[3] AS method#23]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, split(logs#5, ,, -1)[2] AS time#17]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, split(logs#5, ,, -1)[1] AS hostname#12]
+- Project [logs#5, timestamp#1-T5000ms, split(logs#5, ,, -1)[0] AS idx#8]
+- EventTimeWatermark timestamp#1: timestamp, 5 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@3bb35f79, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 43, 56
== Analyzed Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@236377c2
+- Aggregate [hostname#12, responsecode#38], [hostname#12, responsecode#38, count(1) AS count#67L]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, method#23, resource#30, responsecode#38, split(logs#5, ,, -1)[6] AS bytes#47]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, method#23, resource#30, split(logs#5, ,, -1)[5] AS responsecode#38]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, method#23, split(logs#5, ,, -1)[4] AS resource#30]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, time#17, split(logs#5, ,, -1)[3] AS method#23]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, hostname#12, split(logs#5, ,, -1)[2] AS time#17]
+- Project [logs#5, timestamp#1-T5000ms, idx#8, split(logs#5, ,, -1)[1] AS hostname#12]
+- Project [logs#5, timestamp#1-T5000ms, split(logs#5, ,, -1)[0] AS idx#8]
+- EventTimeWatermark timestamp#1: timestamp, 5 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@3bb35f79, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 43, 56
== Optimized Logical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@236377c2
+- Aggregate [hostname#12, responsecode#38], [hostname#12, responsecode#38, count(1) AS count#67L]
+- Project [split(logs#5, ,, -1)[1] AS hostname#12, split(logs#5, ,, -1)[5] AS responsecode#38]
+- EventTimeWatermark timestamp#1: timestamp, 5 seconds
+- Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), [0], false, [logs#5]
+- StreamingDataSourceV2Relation [value#0, timestamp#1], org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1@3bb35f79, TextSocketV2[host: stream-emulator.data-science-tools.svc.cluster.local, port: 5551], 43, 56
== Physical Plan ==
WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@236377c2
+- *(6) HashAggregate(keys=[hostname#12, responsecode#38], functions=[count(1)], output=[hostname#12, responsecode#38, count#67L])
+- StateStoreSave [hostname#12, responsecode#38], state info [ checkpoint = file:/tmp/temporary-1de1ee23-414b-4a88-8dc3-b55201323fb3/state, runId = 05d0ae24-6241-4634-a3bd-732191bb756e, opId = 0, ver = 4, numPartitions = 200], Complete, 1742479452540, 2
+- *(5) HashAggregate(keys=[hostname#12, responsecode#38], functions=[merge_count(1)], output=[hostname#12, responsecode#38, count#75L])
+- StateStoreRestore [hostname#12, responsecode#38], state info [ checkpoint = file:/tmp/temporary-1de1ee23-414b-4a88-8dc3-b55201323fb3/state, runId = 05d0ae24-6241-4634-a3bd-732191bb756e, opId = 0, ver = 4, numPartitions = 200], 2
+- *(4) HashAggregate(keys=[hostname#12, responsecode#38], functions=[merge_count(1)], output=[hostname#12, responsecode#38, count#75L])
+- Exchange hashpartitioning(hostname#12, responsecode#38, 200), true, [id=#404]
+- *(3) HashAggregate(keys=[hostname#12, responsecode#38], functions=[partial_count(1)], output=[hostname#12, responsecode#38, count#75L])
+- *(3) Project [split(logs#5, ,, -1)[1] AS hostname#12, split(logs#5, ,, -1)[5] AS responsecode#38]
+- EventTimeWatermark timestamp#1: timestamp, 5 seconds
+- *(2) Project [logs#5, timestamp#1]
+- Generate explode(split(value#0, , -1)), [timestamp#1], false, [logs#5]
+- *(1) Project [value#0, timestamp#1]
+- MicroBatchScan[value#0, timestamp#1] class org.apache.spark.sql.execution.streaming.sources.TextSocketTable$$anon$1