Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/jetty-12.0.x' into jetty-12.1.x
Browse files Browse the repository at this point in the history
  • Loading branch information
lorban committed Jan 13, 2025
2 parents 14d7a68 + 0a23b82 commit 23e1b0a
Show file tree
Hide file tree
Showing 7 changed files with 101 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,14 @@ private void configure(Map<Integer, Integer> settings, boolean local)
if (LOG.isDebugEnabled())
LOG.debug("Updating {} max header list size to {} for {}", local ? "decoder" : "encoder", value, this);
if (local)
{
parser.getHpackDecoder().setMaxHeaderListSize(value);
}
else
generator.getHpackEncoder().setMaxHeaderListSize(value);
{
HpackEncoder hpackEncoder = generator.getHpackEncoder();
hpackEncoder.setMaxHeaderListSize(Math.min(value, hpackEncoder.getMaxHeaderListSize()));
}
}
case SettingsFrame.ENABLE_CONNECT_PROTOCOL ->
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ public int getMaxHeaderListSize()

public void setMaxHeaderListSize(int maxHeaderListSize)
{
_maxHeaderListSize = maxHeaderListSize;
_maxHeaderListSize = maxHeaderListSize > 0 ? maxHeaderListSize : HpackContext.DEFAULT_MAX_HEADER_LIST_SIZE;
}

public HpackContext getHpackContext()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1315,6 +1315,7 @@ public boolean handle(Request request, Response response, Callback callback)
}, httpConfig);
connector.getBean(AbstractHTTP2ServerConnectionFactory.class).setMaxFrameSize(17 * 1024);
http2Client.setMaxFrameSize(18 * 1024);
http2Client.setMaxRequestHeadersSize(2 * maxHeadersSize);

// Wait for the SETTINGS frame to be exchanged.
CountDownLatch settingsLatch = new CountDownLatch(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,42 @@ public void onGoAway(Session session, GoAwayFrame frame)
assertTrue(goAwayLatch.await(5, TimeUnit.SECONDS));
}

@Test
public void testMaxHeaderListSizeCappedByClient() throws Exception
{
int maxHeadersSize = 2 * 1024;
CountDownLatch goAwayLatch = new CountDownLatch(1);
start(new ServerSessionListener()
{
@Override
public Map<Integer, Integer> onPreface(Session session)
{
return Map.of(SettingsFrame.MAX_HEADER_LIST_SIZE, maxHeadersSize);
}

@Override
public void onGoAway(Session session, GoAwayFrame frame)
{
goAwayLatch.countDown();
}
});
http2Client.setMaxRequestHeadersSize(maxHeadersSize / 2);

Session clientSession = newClientSession(new Session.Listener() {});
HttpFields requestHeaders = HttpFields.build()
.put("X-Large", "x".repeat(maxHeadersSize - 256)); // 256 bytes to account for the other headers
MetaData.Request request = newRequest("GET", requestHeaders);
HeadersFrame frame = new HeadersFrame(request, null, true);

Throwable failure = assertThrows(ExecutionException.class,
() -> clientSession.newStream(frame, new Stream.Listener() {}).get(5, TimeUnit.SECONDS))
.getCause();
// The HPACK context is compromised trying to encode the large header.
assertThat(failure, Matchers.instanceOf(HpackException.SessionException.class));

assertTrue(goAwayLatch.await(5, TimeUnit.SECONDS));
}

@Test
public void testMaxHeaderListSizeExceededByServer() throws Exception
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
Expand All @@ -41,6 +42,7 @@
import org.eclipse.jetty.util.annotation.ManagedOperation;
import org.eclipse.jetty.util.component.Dumpable;
import org.eclipse.jetty.util.component.DumpableCollection;
import org.eclipse.jetty.util.component.DumpableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -66,8 +68,11 @@ public class ArrayByteBufferPool implements ByteBufferPool, Dumpable
private final long _maxHeapMemory;
private final long _maxDirectMemory;
private final IntUnaryOperator _bucketIndexFor;
private final IntUnaryOperator _bucketCapacity;
private final AtomicBoolean _evictor = new AtomicBoolean(false);
private final AtomicLong _reserved = new AtomicLong();
private final ConcurrentMap<Integer, Long> _noBucketDirectAcquires = new ConcurrentHashMap<>();
private final ConcurrentMap<Integer, Long> _noBucketIndirectAcquires = new ConcurrentHashMap<>();
private boolean _statisticsEnabled;

/**
Expand Down Expand Up @@ -166,6 +171,7 @@ protected ArrayByteBufferPool(int minCapacity, int factor, int maxCapacity, int
_maxHeapMemory = maxMemory(maxHeapMemory);
_maxDirectMemory = maxMemory(maxDirectMemory);
_bucketIndexFor = bucketIndexFor;
_bucketCapacity = bucketCapacity;
}

private long maxMemory(long maxMemory)
Expand Down Expand Up @@ -213,7 +219,10 @@ public RetainableByteBuffer.Mutable acquire(int size, boolean direct)

// No bucket, return non-pooled.
if (bucket == null)
{
recordNoBucketAcquire(size, direct);
return RetainableByteBuffer.wrap(BufferUtil.allocate(size, direct));
}

bucket.recordAcquire();

Expand All @@ -232,6 +241,22 @@ public RetainableByteBuffer.Mutable acquire(int size, boolean direct)
return buffer;
}

private void recordNoBucketAcquire(int size, boolean direct)
{
if (isStatisticsEnabled())
{
ConcurrentMap<Integer, Long> map = direct ? _noBucketDirectAcquires : _noBucketIndirectAcquires;
int idx = _bucketIndexFor.applyAsInt(size);
int key = _bucketCapacity.applyAsInt(idx);
map.compute(key, (k, v) ->
{
if (v == null)
return 1L;
return v + 1L;
});
}
}

@Override
public boolean releaseAndRemove(RetainableByteBuffer buffer)
{
Expand Down Expand Up @@ -437,7 +462,9 @@ public long getAvailableHeapMemory()
public void clear()
{
clearBuckets(_direct);
_noBucketDirectAcquires.clear();
clearBuckets(_indirect);
_noBucketIndirectAcquires.clear();
}

private void clearBuckets(RetainedBucket[] buckets)
Expand All @@ -456,7 +483,10 @@ public void dump(Appendable out, String indent) throws IOException
indent,
this,
DumpableCollection.fromArray("direct", _direct),
DumpableCollection.fromArray("indirect", _indirect));
new DumpableMap("direct non-pooled acquisitions", _noBucketDirectAcquires),
DumpableCollection.fromArray("indirect", _indirect),
new DumpableMap("indirect non-pooled acquisitions", _noBucketIndirectAcquires)
);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.junit.jupiter.api.Test;

import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThan;
Expand All @@ -38,6 +39,30 @@

public class ArrayByteBufferPoolTest
{
@Test
public void testDump()
{
ArrayByteBufferPool pool = new ArrayByteBufferPool(0, 10, 100, Integer.MAX_VALUE, 200, 200);
pool.setStatisticsEnabled(true);

List<RetainableByteBuffer> buffers = new ArrayList<>();

for (int i = 1; i < 151; i++)
buffers.add(pool.acquire(i, true));

buffers.forEach(RetainableByteBuffer::release);

String dump = pool.dump();
assertThat(dump, containsString("direct non-pooled acquisitions size=5\n"));
assertThat(dump, containsString("110: 10\n"));
assertThat(dump, containsString("120: 10\n"));
assertThat(dump, containsString("130: 10\n"));
assertThat(dump, containsString("140: 10\n"));
assertThat(dump, containsString("150: 10\n"));
pool.clear();
assertThat(pool.dump(), containsString("direct non-pooled acquisitions size=0\n"));
}

@Test
public void testMaxMemoryEviction()
{
Expand Down
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@
<license.maven.plugin.version>4.5</license.maven.plugin.version>
<localRepoPath>${project.build.directory}/local-repo</localRepoPath>
<log4j2.version>2.24.2</log4j2.version>
<logback.version>1.5.12</logback.version>
<logback.version>1.5.13</logback.version>
<lucene.version>9.9.2</lucene.version>
<mariadb.docker.version>10.3.6</mariadb.docker.version>
<mariadb.version>3.5.1</mariadb.version>
Expand Down

0 comments on commit 23e1b0a

Please sign in to comment.