Skip to content

Commit b405ced

Browse files
committed
tbV2: fix write_wrap_count under-counting natural wraps
write_wrap_count was only being incremented on the "panic wrap" path (when the incoming chunk is too big for the remaining tail space and unread chunks must be evicted to make room). The "natural wrap" path where wr_ reaches size_ exactly and silently resets to 0 was not counted. Consequently traces could legitimately show bytes_written much greater than buffer_size with write_wrap_count = 0, making the stat unreliable for ring-buffer-pressure diagnostics. V1 counts both paths; V2 had regressed this. Fix: introduce a local |wrapped| flag in CopyChunkUntrusted, set it in both wrap branches, and increment write_wrap_count once at the end if wrapped is true. Why a flag instead of incrementing in both places directly: when the incoming chunk is exactly the buffer size, the panic-wrap path resets wr_ to 0, the chunk is then written, wr_ advances back to size_, and the natural-wrap branch fires too. With direct increments in both places this single CopyChunkUntrusted call would double-count one logical wrap. The flag collapses both branches into a single increment per call, matching the proto contract: // Num. times the ring buffer wrapped around. Adds a regression test that exercises both wrap paths and asserts the counter increments correctly.
1 parent 31d39b6 commit b405ced

2 files changed

Lines changed: 44 additions & 2 deletions

File tree

src/tracing/service/trace_buffer_v2.cc

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -777,6 +777,7 @@ void TraceBufferV2::CopyChunkUntrusted(
777777
// If there isn't enough room from the given write position: write a padding
778778
// record to clear the end of the buffer, wrap and start at offset 0.
779779
const size_t cached_size_to_end = size_to_end();
780+
bool wrapped = false;
780781
if (PERFETTO_UNLIKELY(tbchunk_outer_size > cached_size_to_end)) {
781782
// If we reached the end of the buffer and we are using discard policy,
782783
// this is where we stop. This buffer will no longer accept data.
@@ -785,7 +786,7 @@ void TraceBufferV2::CopyChunkUntrusted(
785786

786787
DeleteNextChunksFor(cached_size_to_end);
787788
wr_ = 0;
788-
stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
789+
wrapped = true;
789790
PERFETTO_DCHECK(size_to_end() >= tbchunk_outer_size);
790791
}
791792

@@ -880,10 +881,15 @@ void TraceBufferV2::CopyChunkUntrusted(
880881

881882
wr_ += tbchunk_outer_size;
882883
PERFETTO_DCHECK(wr_ <= size_ && wr_ <= used_size_);
883-
wr_ = wr_ >= size_ ? 0 : wr_;
884+
if (wr_ >= size_) {
885+
wr_ = 0;
886+
wrapped = true;
887+
}
884888

885889
stats_.set_chunks_written(stats_.chunks_written() + 1);
886890
stats_.set_bytes_written(stats_.bytes_written() + tbchunk_outer_size);
891+
if (wrapped)
892+
stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
887893

888894
// We purge SequenceStates(s) only here, because other parts of the readback
889895
// code (BeginRead()/ReadNextTracePacket()) need to cache SequenceState*

src/tracing/service/trace_buffer_v2_unittest.cc

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2092,6 +2092,42 @@ TEST_F(TraceBufferV2Test, Alignment_ExactBufferBoundaryFragmentation) {
20922092
ASSERT_THAT(ReadPacket(), IsEmpty());
20932093
}
20942094

2095+
// Verify that write_wrap_count increments on both wrap paths:
2096+
// - natural wrap: chunk exactly fills to the end of the buffer.
2097+
// - panic wrap: incoming chunk doesn't fit in the remaining tail space.
2098+
// Packet sizes are chosen so the in-buffer outer_size aligns to the desired
2099+
// values (16-byte alignment; ~2 bytes of varint overhead per fragment).
2100+
TEST_F(TraceBufferV2Test, WriteWrapCount_CountsBothWrapPaths) {
2101+
// Buffer is aligned up to 4096 bytes minimum by Initialize().
2102+
ResetBuffer(4096);
2103+
EXPECT_EQ(0u, trace_buffer()->stats().write_wrap_count());
2104+
2105+
// Two 2048-byte chunks fill the buffer exactly. The second one triggers a
2106+
// natural wrap (wr_ reaches size_ and resets to 0).
2107+
CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
2108+
.AddPacket(2030, 'a')
2109+
.CopyIntoTraceBuffer();
2110+
EXPECT_EQ(0u, trace_buffer()->stats().write_wrap_count());
2111+
2112+
CreateChunk(ProducerID(1), WriterID(1), ChunkID(1))
2113+
.AddPacket(2030, 'b')
2114+
.CopyIntoTraceBuffer();
2115+
EXPECT_EQ(1u, trace_buffer()->stats().write_wrap_count());
2116+
2117+
// Third chunk of 3072 bytes at wr_=0 fits without wrap.
2118+
CreateChunk(ProducerID(1), WriterID(1), ChunkID(2))
2119+
.AddPacket(3054, 'c')
2120+
.CopyIntoTraceBuffer();
2121+
EXPECT_EQ(1u, trace_buffer()->stats().write_wrap_count());
2122+
2123+
// Fourth chunk of 2048 bytes at wr_=3072: doesn't fit (size_to_end=1024),
2124+
// triggers panic wrap.
2125+
CreateChunk(ProducerID(1), WriterID(1), ChunkID(3))
2126+
.AddPacket(2030, 'd')
2127+
.CopyIntoTraceBuffer();
2128+
EXPECT_EQ(2u, trace_buffer()->stats().write_wrap_count());
2129+
}
2130+
20952131
// Test out-of-order patch application with fragmentation
20962132
TEST_F(TraceBufferV2Test, Patching_OutOfOrderPatchesWithFragmentation) {
20972133
ResetBuffer(4096);

0 commit comments

Comments
 (0)