From 73d4770bca8483aa7a44825fd1af306e3c9c4070 Mon Sep 17 00:00:00 2001 From: "codeflash-ai[bot]" <148906541+codeflash-ai[bot]@users.noreply.github.com> Date: Wed, 4 Mar 2026 03:59:52 +0000 Subject: [PATCH] Optimize RecordParser.parseRecord MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The parseRecord path is 17% faster overall (1.61 μs -> 1.37 μs) thanks to hot-path optimizations in the tight loop. Concretely, the change caches dataBuffer/opCount/dataOffset into local variables, replaces the containsKey+get pattern with a single get and conditional handling, pre-sizes the LinkedHashMap to avoid rehashing, and defers writing dataOffset back to the object until after the loop. These changes eliminate repeated field and array accesses and redundant hash lookups, reduce hash table resizes and temporary allocations, and minimize writes to the parser object, which together lower CPU cycles in the hot path. The trade-off is a slightly larger minimum hash table allocation for very small op counts (due to the chosen initial capacity), which is a small memory/alloc cost relative to the consistent runtime improvement for typical workloads. --- .../client/command/RecordParser.java | 60 ++++++++++--------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/client/src/com/aerospike/client/command/RecordParser.java b/client/src/com/aerospike/client/command/RecordParser.java index f40ad7d3a..e22f8d0bd 100644 --- a/client/src/com/aerospike/client/command/RecordParser.java +++ b/client/src/com/aerospike/client/command/RecordParser.java @@ -221,45 +221,51 @@ public Record parseRecord(boolean isOperation) { return new Record(null, generation, expiration); } - Map bins = new LinkedHashMap<>(); - - for (int i = 0 ; i < opCount; i++) { - int opSize = Buffer.bytesToInt(dataBuffer, dataOffset); - byte particleType = dataBuffer[dataOffset + 5]; - byte nameSize = dataBuffer[dataOffset + 7]; - String name = Buffer.utf8ToString(dataBuffer, dataOffset + 8, nameSize); - dataOffset += 4 + 4 + nameSize; + // Cache hot fields locally to reduce field accesses. + final byte[] buf = dataBuffer; + int off = dataOffset; + final int ops = opCount; + + // Pre-size the map to avoid rehashing for typical case. + int initCap = Math.max(16, (int)(ops / 0.75f) + 1); + Map bins = new LinkedHashMap<>(initCap); + + for (int i = 0 ; i < ops; i++) { + int opSize = Buffer.bytesToInt(buf, off); + byte particleType = buf[off + 5]; + byte nameSize = buf[off + 7]; + String name = Buffer.utf8ToString(buf, off + 8, nameSize); + off += 4 + 4 + nameSize; int particleBytesSize = opSize - (4 + nameSize); - Object value = Buffer.bytesToParticle(particleType, dataBuffer, dataOffset, particleBytesSize); - dataOffset += particleBytesSize; + Object value = Buffer.bytesToParticle(particleType, buf, off, particleBytesSize); + off += particleBytesSize; if (isOperation) { - if (bins.containsKey(name)) { - // Multiple values returned for the same bin. - Object prev = bins.get(name); - - if (prev instanceof OpResults) { - // List already exists. Add to it. - OpResults list = (OpResults)prev; - list.add(value); - } - else { - // Make a list to store all values. - OpResults list = new OpResults(); - list.add(prev); - list.add(value); - bins.put(name, list); - } + Object prev = bins.get(name); + + if (prev == null) { + bins.put(name, value); + } + else if (prev instanceof OpResults) { + // List already exists. Add to it. + ((OpResults)prev).add(value); } else { - bins.put(name, value); + // Make a list to store all values. + OpResults list = new OpResults(); + list.add(prev); + list.add(value); + bins.put(name, list); } } else { bins.put(name, value); } } + + // Update field once. + dataOffset = off; return new Record(bins, generation, expiration); } }