Skip to content
Commits on Source (3)
......@@ -37,7 +37,7 @@
<parent>
<groupId>org.jboss.xnio</groupId>
<artifactId>xnio-all</artifactId>
<version>3.7.6.Final</version>
<version>3.7.7.Final</version>
</parent>
<dependencies>
......
jboss-xnio (3.7.7-1) unstable; urgency=medium
* New upstream version 3.7.7.
-- Markus Koschany <apo@debian.org> Sat, 23 Nov 2019 23:13:41 +0100
jboss-xnio (3.7.6-1) unstable; urgency=medium
* New upstream version 3.7.6.
......
......@@ -31,7 +31,7 @@
<parent>
<groupId>org.jboss.xnio</groupId>
<artifactId>xnio-all</artifactId>
<version>3.7.6.Final</version>
<version>3.7.7.Final</version>
</parent>
<properties>
......
......@@ -47,6 +47,7 @@ interface Log extends BasicLogger {
Log socketLog = Logger.getMessageLogger(Log.class, "org.xnio.nio.socket");
Log selectorLog = Logger.getMessageLogger(Log.class, "org.xnio.nio.selector");
Log tcpServerLog = Logger.getMessageLogger(Log.class, "org.xnio.nio.tcp.server");
Log tcpServerConnectionLimitLog = Logger.getMessageLogger(Log.class, "org.xnio.nio.tcp.server.connection-limit");
Log udpServerChannelLog = Logger.getMessageLogger(Log.class, "org.xnio.nio.udp.server.channel");
// Greeting
......
......@@ -26,13 +26,19 @@ import org.xnio.ChannelListeners;
import static java.lang.Math.min;
import static java.lang.Math.max;
import static java.lang.Thread.currentThread;
import org.jboss.logging.Logger;
import static org.xnio.IoUtils.safeClose;
import static org.xnio.nio.Log.tcpServerConnectionLimitLog;
import static org.xnio.nio.Log.tcpServerLog;
/**
* @author <a href="mailto:david.lloyd@redhat.com">David M. Lloyd</a>
*/
final class NioTcpServerHandle extends NioHandle implements ChannelClosed {
private static final String FQCN = NioTcpServerHandle.class.getName();
private final Runnable freeTask;
private final NioTcpServer server;
private int count;
......@@ -109,6 +115,8 @@ final class NioTcpServerHandle extends NioHandle implements ChannelClosed {
void freeConnection() {
assert currentThread() == getWorkerThread();
if (count-- <= low && tokenCount != 0 && stopped) {
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.DEBUG, null,
"Connection freed, resuming accept connections");
stopped = false;
if (server.resumed) {
// end backoff optimistically
......@@ -191,12 +199,17 @@ final class NioTcpServerHandle extends NioHandle implements ChannelClosed {
boolean getConnection() {
assert currentThread() == getWorkerThread();
if (stopped || backOff) {
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.DEBUG, null, "Refusing accepting request (temporarily stopped: %s, backed off: %s)", stopped, backOff);
return false;
}
if (tokenCount != -1 && --tokenCount == 0) {
setThreadNewCount(getWorkerThread().getNextThread(), server.getTokenConnectionCount());
}
if (++count >= high || tokenCount == 0) {
if (tcpServerLog.isDebugEnabled() && count >= high)
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.DEBUG, null,
"Total open connections reach high water limit (%s) by this new accepting request. Temporarily stopping accept connections",
high);
stopped = true;
super.suspend(SelectionKey.OP_ACCEPT);
}
......
......@@ -20,6 +20,7 @@ package org.xnio.nio;
import static org.xnio.IoUtils.safeClose;
import static org.xnio.nio.Log.log;
import static org.xnio.nio.Log.tcpServerConnectionLimitLog;
import static org.xnio.nio.Log.tcpServerLog;
import java.io.IOException;
......@@ -114,6 +115,7 @@ final class QueuedNioTcpServer extends AbstractNioChannel<QueuedNioTcpServer> im
* The current number of open connections, can only be accessed by the accept thread
*/
private int openConnections;
private boolean limitwarn = true;
private volatile boolean suspendedDueToWatermark;
private volatile boolean suspended;
......@@ -359,7 +361,7 @@ final class QueuedNioTcpServer extends AbstractNioChannel<QueuedNioTcpServer> im
if(openConnections >= getHighWater(connectionStatus)) {
synchronized (QueuedNioTcpServer.this) {
suspendedDueToWatermark = true;
tcpServerLog.logf(FQCN, Logger.Level.DEBUG, null, "Total open connections reach high water limit (%s) after updating water mark", getHighWater(connectionStatus));
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.DEBUG, null, "Total open connections reach high water limit (%s) after updating water mark", getHighWater(connectionStatus));
}
} else if(suspendedDueToWatermark && openConnections <= getLowWater(connectionStatus)) {
suspendedDueToWatermark = false;
......@@ -478,7 +480,7 @@ final class QueuedNioTcpServer extends AbstractNioChannel<QueuedNioTcpServer> im
try {
accepted = channel.accept();
if(suspendedDueToWatermark) {
tcpServerLog.logf(FQCN, Logger.Level.DEBUG, null, "Exceeding connection high water limit (%s). Closing this new accepting request %s", getHighWater(connectionStatus), accepted);
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.DEBUG, null, "Exceeding connection high water limit (%s). Closing this new accepting request %s", getHighWater(connectionStatus), accepted);
IoUtils.safeClose(accepted);
return;
}
......@@ -512,7 +514,12 @@ final class QueuedNioTcpServer extends AbstractNioChannel<QueuedNioTcpServer> im
if(openConnections >= getHighWater(connectionStatus)) {
synchronized (QueuedNioTcpServer.this) {
suspendedDueToWatermark = true;
tcpServerLog.logf(FQCN, Logger.Level.DEBUG, null, "Total open connections reach high water limit (%s) by this new accepting request %s", getHighWater(connectionStatus), accepted);
if (limitwarn) {
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.WARN, null, "Total open connections reach high water limit (%s) by this new accepting request %s", getHighWater(connectionStatus), accepted);
limitwarn = false;
} else {
tcpServerConnectionLimitLog.logf(FQCN, Logger.Level.DEBUG, null, "Total open connections reach high water limit (%s) by this new accepting request %s", getHighWater(connectionStatus), accepted);
}
}
}
} finally {
......
......@@ -32,7 +32,7 @@
<artifactId>xnio-all</artifactId>
<packaging>pom</packaging>
<name>XNIO Parent POM</name>
<version>3.7.6.Final</version>
<version>3.7.7.Final</version>
<description>The aggregator POM of the XNIO project</description>
<licenses>
......