Compare commits
8 Commits
0.1.4
...
0.2.0-alph
Author | SHA1 | Date | |
---|---|---|---|
adf8a0cf24
|
|||
42eb26a948
|
|||
f048a60540
|
|||
0463038aaa
|
|||
7eca8a270d
|
|||
84d7c977f9
|
|||
317eadce07
|
|||
af79e74b95
|
@@ -5,7 +5,7 @@ WORKDIR /home/luser
|
|||||||
|
|
||||||
FROM base-release AS release
|
FROM base-release AS release
|
||||||
ADD rbcs-cli-envelope-*.jar rbcs.jar
|
ADD rbcs-cli-envelope-*.jar rbcs.jar
|
||||||
ENTRYPOINT ["java", "-XX:+UseZGC", "-XX:+ZGenerational", "-jar", "/home/luser/rbcs.jar", "server"]
|
ENTRYPOINT ["java", "-XX:+UseSerialGC", "-XX:GCTimeRatio=24", "-jar", "/home/luser/rbcs.jar", "server"]
|
||||||
|
|
||||||
FROM base-release AS release-memcache
|
FROM base-release AS release-memcache
|
||||||
ADD --chown=luser:luser rbcs-cli-envelope-*.jar rbcs.jar
|
ADD --chown=luser:luser rbcs-cli-envelope-*.jar rbcs.jar
|
||||||
@@ -13,4 +13,4 @@ RUN mkdir plugins
|
|||||||
WORKDIR /home/luser/plugins
|
WORKDIR /home/luser/plugins
|
||||||
RUN --mount=type=bind,source=.,target=/build/distributions tar -xf /build/distributions/rbcs-server-memcache*.tar
|
RUN --mount=type=bind,source=.,target=/build/distributions tar -xf /build/distributions/rbcs-server-memcache*.tar
|
||||||
WORKDIR /home/luser
|
WORKDIR /home/luser
|
||||||
ENTRYPOINT ["java", "-XX:+UseZGC", "-XX:+ZGenerational", "-jar", "/home/luser/rbcs.jar", "server"]
|
ENTRYPOINT ["java", "-XX:+UseSerialGC", "-XX:GCTimeRatio=24", "-jar", "/home/luser/rbcs.jar", "server"]
|
||||||
|
@@ -2,11 +2,10 @@ org.gradle.configuration-cache=false
|
|||||||
org.gradle.parallel=true
|
org.gradle.parallel=true
|
||||||
org.gradle.caching=true
|
org.gradle.caching=true
|
||||||
|
|
||||||
rbcs.version = 0.1.4
|
rbcs.version = 0.2.0
|
||||||
|
|
||||||
lys.version = 2025.02.05
|
lys.version = 2025.02.08
|
||||||
|
|
||||||
gitea.maven.url = https://gitea.woggioni.net/api/packages/woggioni/maven
|
gitea.maven.url = https://gitea.woggioni.net/api/packages/woggioni/maven
|
||||||
docker.registry.url=gitea.woggioni.net
|
docker.registry.url=gitea.woggioni.net
|
||||||
|
|
||||||
jpms-check.configurationName = runtimeClasspath
|
|
||||||
|
@@ -6,6 +6,7 @@ plugins {
|
|||||||
|
|
||||||
dependencies {
|
dependencies {
|
||||||
api catalog.netty.buffer
|
api catalog.netty.buffer
|
||||||
|
api catalog.netty.handler
|
||||||
}
|
}
|
||||||
|
|
||||||
publishing {
|
publishing {
|
||||||
|
@@ -2,6 +2,9 @@ module net.woggioni.rbcs.api {
|
|||||||
requires static lombok;
|
requires static lombok;
|
||||||
requires java.xml;
|
requires java.xml;
|
||||||
requires io.netty.buffer;
|
requires io.netty.buffer;
|
||||||
|
requires io.netty.handler;
|
||||||
|
requires io.netty.transport;
|
||||||
exports net.woggioni.rbcs.api;
|
exports net.woggioni.rbcs.api;
|
||||||
exports net.woggioni.rbcs.api.exception;
|
exports net.woggioni.rbcs.api.exception;
|
||||||
|
exports net.woggioni.rbcs.api.message;
|
||||||
}
|
}
|
@@ -1,14 +0,0 @@
|
|||||||
package net.woggioni.rbcs.api;
|
|
||||||
|
|
||||||
import io.netty.buffer.ByteBuf;
|
|
||||||
import net.woggioni.rbcs.api.exception.ContentTooLargeException;
|
|
||||||
|
|
||||||
import java.nio.channels.ReadableByteChannel;
|
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
|
|
||||||
|
|
||||||
public interface Cache extends AutoCloseable {
|
|
||||||
CompletableFuture<ReadableByteChannel> get(String key);
|
|
||||||
|
|
||||||
CompletableFuture<Void> put(String key, ByteBuf content) throws ContentTooLargeException;
|
|
||||||
}
|
|
@@ -0,0 +1,7 @@
|
|||||||
|
package net.woggioni.rbcs.api;
|
||||||
|
|
||||||
|
import io.netty.channel.ChannelHandler;
|
||||||
|
|
||||||
|
public interface CacheHandlerFactory extends AutoCloseable {
|
||||||
|
ChannelHandler newHandler();
|
||||||
|
}
|
@@ -0,0 +1,14 @@
|
|||||||
|
package net.woggioni.rbcs.api;
|
||||||
|
|
||||||
|
import lombok.Getter;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
@Getter
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
public class CacheValueMetadata implements Serializable {
|
||||||
|
private final String contentDisposition;
|
||||||
|
private final String mimeType;
|
||||||
|
}
|
||||||
|
|
@@ -135,7 +135,7 @@ public class Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public interface Cache {
|
public interface Cache {
|
||||||
net.woggioni.rbcs.api.Cache materialize();
|
CacheHandlerFactory materialize();
|
||||||
String getNamespaceURI();
|
String getNamespaceURI();
|
||||||
String getTypeName();
|
String getTypeName();
|
||||||
}
|
}
|
||||||
|
@@ -0,0 +1,161 @@
|
|||||||
|
package net.woggioni.rbcs.api.message;
|
||||||
|
|
||||||
|
import io.netty.buffer.ByteBuf;
|
||||||
|
import io.netty.buffer.ByteBufHolder;
|
||||||
|
import lombok.Getter;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import net.woggioni.rbcs.api.CacheValueMetadata;
|
||||||
|
|
||||||
|
public sealed interface CacheMessage {
|
||||||
|
|
||||||
|
@Getter
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
final class CacheGetRequest implements CacheMessage {
|
||||||
|
private final String key;
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract sealed class CacheGetResponse implements CacheMessage {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Getter
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
final class CacheValueFoundResponse extends CacheGetResponse {
|
||||||
|
private final String key;
|
||||||
|
private final CacheValueMetadata metadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
final class CacheValueNotFoundResponse extends CacheGetResponse {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Getter
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
final class CachePutRequest implements CacheMessage {
|
||||||
|
private final String key;
|
||||||
|
private final CacheValueMetadata metadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Getter
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
final class CachePutResponse implements CacheMessage {
|
||||||
|
private final String key;
|
||||||
|
}
|
||||||
|
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
non-sealed class CacheContent implements CacheMessage, ByteBufHolder {
|
||||||
|
protected final ByteBuf chunk;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ByteBuf content() {
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent copy() {
|
||||||
|
return replace(chunk.copy());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent duplicate() {
|
||||||
|
return new CacheContent(chunk.duplicate());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent retainedDuplicate() {
|
||||||
|
return new CacheContent(chunk.retainedDuplicate());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent replace(ByteBuf content) {
|
||||||
|
return new CacheContent(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent retain() {
|
||||||
|
chunk.retain();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent retain(int increment) {
|
||||||
|
chunk.retain(increment);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent touch() {
|
||||||
|
chunk.touch();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CacheContent touch(Object hint) {
|
||||||
|
chunk.touch(hint);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int refCnt() {
|
||||||
|
return chunk.refCnt();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean release() {
|
||||||
|
return chunk.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean release(int decrement) {
|
||||||
|
return chunk.release(decrement);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
final class LastCacheContent extends CacheContent {
|
||||||
|
public LastCacheContent(ByteBuf chunk) {
|
||||||
|
super(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent copy() {
|
||||||
|
return replace(chunk.copy());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent duplicate() {
|
||||||
|
return new LastCacheContent(chunk.duplicate());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent retainedDuplicate() {
|
||||||
|
return new LastCacheContent(chunk.retainedDuplicate());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent replace(ByteBuf content) {
|
||||||
|
return new LastCacheContent(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent retain() {
|
||||||
|
super.retain();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent retain(int increment) {
|
||||||
|
super.retain(increment);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent touch() {
|
||||||
|
super.touch();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LastCacheContent touch(Object hint) {
|
||||||
|
super.touch(hint);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -44,7 +44,6 @@ envelopeJar {
|
|||||||
dependencies {
|
dependencies {
|
||||||
implementation catalog.jwo
|
implementation catalog.jwo
|
||||||
implementation catalog.slf4j.api
|
implementation catalog.slf4j.api
|
||||||
implementation catalog.netty.codec.http
|
|
||||||
implementation catalog.picocli
|
implementation catalog.picocli
|
||||||
|
|
||||||
implementation project(':rbcs-client')
|
implementation project(':rbcs-client')
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
package net.woggioni.rbcs.cli
|
package net.woggioni.rbcs.cli
|
||||||
|
|
||||||
|
import net.woggioni.jwo.Application
|
||||||
import net.woggioni.rbcs.cli.impl.AbstractVersionProvider
|
import net.woggioni.rbcs.cli.impl.AbstractVersionProvider
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.cli.impl.commands.BenchmarkCommand
|
import net.woggioni.rbcs.cli.impl.commands.BenchmarkCommand
|
||||||
@@ -11,7 +12,6 @@ import net.woggioni.rbcs.cli.impl.commands.PutCommand
|
|||||||
import net.woggioni.rbcs.cli.impl.commands.ServerCommand
|
import net.woggioni.rbcs.cli.impl.commands.ServerCommand
|
||||||
import net.woggioni.rbcs.common.RbcsUrlStreamHandlerFactory
|
import net.woggioni.rbcs.common.RbcsUrlStreamHandlerFactory
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.contextLogger
|
||||||
import net.woggioni.jwo.Application
|
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import picocli.CommandLine.Model.CommandSpec
|
import picocli.CommandLine.Model.CommandSpec
|
||||||
|
|
||||||
|
@@ -1,15 +1,20 @@
|
|||||||
package net.woggioni.rbcs.cli.impl.commands
|
package net.woggioni.rbcs.cli.impl.commands
|
||||||
|
|
||||||
|
import net.woggioni.jwo.JWO
|
||||||
|
import net.woggioni.jwo.LongMath
|
||||||
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
|
import net.woggioni.rbcs.cli.impl.converters.ByteSizeConverter
|
||||||
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
|
import net.woggioni.rbcs.common.debug
|
||||||
import net.woggioni.rbcs.common.error
|
import net.woggioni.rbcs.common.error
|
||||||
import net.woggioni.rbcs.common.info
|
import net.woggioni.rbcs.common.info
|
||||||
import net.woggioni.jwo.JWO
|
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.security.SecureRandom
|
import java.security.SecureRandom
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.Instant
|
import java.time.Instant
|
||||||
|
import java.time.temporal.ChronoUnit
|
||||||
import java.util.concurrent.LinkedBlockingQueue
|
import java.util.concurrent.LinkedBlockingQueue
|
||||||
import java.util.concurrent.Semaphore
|
import java.util.concurrent.Semaphore
|
||||||
import java.util.concurrent.atomic.AtomicLong
|
import java.util.concurrent.atomic.AtomicLong
|
||||||
@@ -21,7 +26,9 @@ import kotlin.random.Random
|
|||||||
showDefaultValues = true
|
showDefaultValues = true
|
||||||
)
|
)
|
||||||
class BenchmarkCommand : RbcsCommand() {
|
class BenchmarkCommand : RbcsCommand() {
|
||||||
private val log = contextLogger()
|
companion object{
|
||||||
|
private val log = createLogger<BenchmarkCommand>()
|
||||||
|
}
|
||||||
|
|
||||||
@CommandLine.Spec
|
@CommandLine.Spec
|
||||||
private lateinit var spec: CommandLine.Model.CommandSpec
|
private lateinit var spec: CommandLine.Model.CommandSpec
|
||||||
@@ -36,24 +43,36 @@ class BenchmarkCommand : RbcsCommand() {
|
|||||||
@CommandLine.Option(
|
@CommandLine.Option(
|
||||||
names = ["-s", "--size"],
|
names = ["-s", "--size"],
|
||||||
description = ["Size of a cache value in bytes"],
|
description = ["Size of a cache value in bytes"],
|
||||||
paramLabel = "SIZE"
|
paramLabel = "SIZE",
|
||||||
|
converter = [ByteSizeConverter::class]
|
||||||
)
|
)
|
||||||
private var size = 0x1000
|
private var size = 0x1000
|
||||||
|
|
||||||
|
@CommandLine.Option(
|
||||||
|
names = ["-r", "--random"],
|
||||||
|
description = ["Insert completely random byte values"]
|
||||||
|
)
|
||||||
|
private var randomValues = false
|
||||||
|
|
||||||
override fun run() {
|
override fun run() {
|
||||||
val clientCommand = spec.parent().userObject() as ClientCommand
|
val clientCommand = spec.parent().userObject() as ClientCommand
|
||||||
val profile = clientCommand.profileName.let { profileName ->
|
val profile = clientCommand.profileName.let { profileName ->
|
||||||
clientCommand.configuration.profiles[profileName]
|
clientCommand.configuration.profiles[profileName]
|
||||||
?: throw IllegalArgumentException("Profile $profileName does not exist in configuration")
|
?: throw IllegalArgumentException("Profile $profileName does not exist in configuration")
|
||||||
}
|
}
|
||||||
|
val progressThreshold = LongMath.ceilDiv(numberOfEntries.toLong(), 20)
|
||||||
RemoteBuildCacheClient(profile).use { client ->
|
RemoteBuildCacheClient(profile).use { client ->
|
||||||
|
|
||||||
val entryGenerator = sequence {
|
val entryGenerator = sequence {
|
||||||
val random = Random(SecureRandom.getInstance("NativePRNGNonBlocking").nextLong())
|
val random = Random(SecureRandom.getInstance("NativePRNGNonBlocking").nextLong())
|
||||||
while (true) {
|
while (true) {
|
||||||
val key = JWO.bytesToHex(random.nextBytes(16))
|
val key = JWO.bytesToHex(random.nextBytes(16))
|
||||||
val content = random.nextInt().toByte()
|
val value = if(randomValues) {
|
||||||
val value = ByteArray(size, { _ -> content })
|
random.nextBytes(size)
|
||||||
|
} else {
|
||||||
|
val byteValue = random.nextInt().toByte()
|
||||||
|
ByteArray(size) {_ -> byteValue}
|
||||||
|
}
|
||||||
yield(key to value)
|
yield(key to value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -65,13 +84,13 @@ class BenchmarkCommand : RbcsCommand() {
|
|||||||
val completionCounter = AtomicLong(0)
|
val completionCounter = AtomicLong(0)
|
||||||
val completionQueue = LinkedBlockingQueue<Pair<String, ByteArray>>(numberOfEntries)
|
val completionQueue = LinkedBlockingQueue<Pair<String, ByteArray>>(numberOfEntries)
|
||||||
val start = Instant.now()
|
val start = Instant.now()
|
||||||
val semaphore = Semaphore(profile.maxConnections * 3)
|
val semaphore = Semaphore(profile.maxConnections * 5)
|
||||||
val iterator = entryGenerator.take(numberOfEntries).iterator()
|
val iterator = entryGenerator.take(numberOfEntries).iterator()
|
||||||
while (completionCounter.get() < numberOfEntries) {
|
while (completionCounter.get() < numberOfEntries) {
|
||||||
if (iterator.hasNext()) {
|
if (iterator.hasNext()) {
|
||||||
val entry = iterator.next()
|
val entry = iterator.next()
|
||||||
semaphore.acquire()
|
semaphore.acquire()
|
||||||
val future = client.put(entry.first, entry.second).thenApply { entry }
|
val future = client.put(entry.first, entry.second, CacheValueMetadata(null, null)).thenApply { entry }
|
||||||
future.whenComplete { result, ex ->
|
future.whenComplete { result, ex ->
|
||||||
if (ex != null) {
|
if (ex != null) {
|
||||||
log.error(ex.message, ex)
|
log.error(ex.message, ex)
|
||||||
@@ -79,10 +98,15 @@ class BenchmarkCommand : RbcsCommand() {
|
|||||||
completionQueue.put(result)
|
completionQueue.put(result)
|
||||||
}
|
}
|
||||||
semaphore.release()
|
semaphore.release()
|
||||||
completionCounter.incrementAndGet()
|
val completed = completionCounter.incrementAndGet()
|
||||||
|
if(completed.mod(progressThreshold) == 0L) {
|
||||||
|
log.debug {
|
||||||
|
"Inserted $completed / $numberOfEntries"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Thread.sleep(0)
|
Thread.sleep(Duration.of(500, ChronoUnit.MILLIS))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,12 +127,13 @@ class BenchmarkCommand : RbcsCommand() {
|
|||||||
}
|
}
|
||||||
if (entries.isNotEmpty()) {
|
if (entries.isNotEmpty()) {
|
||||||
val completionCounter = AtomicLong(0)
|
val completionCounter = AtomicLong(0)
|
||||||
val semaphore = Semaphore(profile.maxConnections * 3)
|
val semaphore = Semaphore(profile.maxConnections * 5)
|
||||||
val start = Instant.now()
|
val start = Instant.now()
|
||||||
val it = entries.iterator()
|
val it = entries.iterator()
|
||||||
while (completionCounter.get() < entries.size) {
|
while (completionCounter.get() < entries.size) {
|
||||||
if (it.hasNext()) {
|
if (it.hasNext()) {
|
||||||
val entry = it.next()
|
val entry = it.next()
|
||||||
|
semaphore.acquire()
|
||||||
val future = client.get(entry.first).thenApply {
|
val future = client.get(entry.first).thenApply {
|
||||||
if (it == null) {
|
if (it == null) {
|
||||||
log.error {
|
log.error {
|
||||||
@@ -121,11 +146,16 @@ class BenchmarkCommand : RbcsCommand() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
future.whenComplete { _, _ ->
|
future.whenComplete { _, _ ->
|
||||||
completionCounter.incrementAndGet()
|
val completed = completionCounter.incrementAndGet()
|
||||||
|
if(completed.mod(progressThreshold) == 0L) {
|
||||||
|
log.debug {
|
||||||
|
"Retrieved $completed / ${entries.size}"
|
||||||
|
}
|
||||||
|
}
|
||||||
semaphore.release()
|
semaphore.release()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Thread.sleep(0)
|
Thread.sleep(Duration.of(500, ChronoUnit.MILLIS))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
val end = Instant.now()
|
val end = Instant.now()
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
package net.woggioni.rbcs.cli.impl.commands
|
package net.woggioni.rbcs.cli.impl.commands
|
||||||
|
|
||||||
|
import net.woggioni.jwo.Application
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
||||||
import net.woggioni.jwo.Application
|
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
|
|
||||||
|
@@ -2,7 +2,7 @@ package net.woggioni.rbcs.cli.impl.commands
|
|||||||
|
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.nio.file.Files
|
import java.nio.file.Files
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
@@ -13,7 +13,9 @@ import java.nio.file.Path
|
|||||||
showDefaultValues = true
|
showDefaultValues = true
|
||||||
)
|
)
|
||||||
class GetCommand : RbcsCommand() {
|
class GetCommand : RbcsCommand() {
|
||||||
private val log = contextLogger()
|
companion object{
|
||||||
|
private val log = createLogger<GetCommand>()
|
||||||
|
}
|
||||||
|
|
||||||
@CommandLine.Spec
|
@CommandLine.Spec
|
||||||
private lateinit var spec: CommandLine.Model.CommandSpec
|
private lateinit var spec: CommandLine.Model.CommandSpec
|
||||||
|
@@ -2,7 +2,7 @@ package net.woggioni.rbcs.cli.impl.commands
|
|||||||
|
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.security.SecureRandom
|
import java.security.SecureRandom
|
||||||
import kotlin.random.Random
|
import kotlin.random.Random
|
||||||
@@ -13,7 +13,9 @@ import kotlin.random.Random
|
|||||||
showDefaultValues = true
|
showDefaultValues = true
|
||||||
)
|
)
|
||||||
class HealthCheckCommand : RbcsCommand() {
|
class HealthCheckCommand : RbcsCommand() {
|
||||||
private val log = contextLogger()
|
companion object{
|
||||||
|
private val log = createLogger<HealthCheckCommand>()
|
||||||
|
}
|
||||||
|
|
||||||
@CommandLine.Spec
|
@CommandLine.Spec
|
||||||
private lateinit var spec: CommandLine.Model.CommandSpec
|
private lateinit var spec: CommandLine.Model.CommandSpec
|
||||||
@@ -32,11 +34,12 @@ class HealthCheckCommand : RbcsCommand() {
|
|||||||
if(value == null) {
|
if(value == null) {
|
||||||
throw IllegalStateException("Empty response from server")
|
throw IllegalStateException("Empty response from server")
|
||||||
}
|
}
|
||||||
|
val offset = value.size - nonce.size
|
||||||
for(i in 0 until nonce.size) {
|
for(i in 0 until nonce.size) {
|
||||||
for(j in value.size - nonce.size until nonce.size) {
|
val a = nonce[i]
|
||||||
if(nonce[i] != value[j]) {
|
val b = value[offset + i]
|
||||||
throw IllegalStateException("Server nonce does not match")
|
if(a != b) {
|
||||||
}
|
throw IllegalStateException("Server nonce does not match")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}.get()
|
}.get()
|
||||||
|
@@ -1,9 +1,9 @@
|
|||||||
package net.woggioni.rbcs.cli.impl.commands
|
package net.woggioni.rbcs.cli.impl.commands
|
||||||
|
|
||||||
|
import net.woggioni.jwo.UncloseableOutputStream
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.cli.impl.converters.OutputStreamConverter
|
import net.woggioni.rbcs.cli.impl.converters.OutputStreamConverter
|
||||||
import net.woggioni.rbcs.common.PasswordSecurity.hashPassword
|
import net.woggioni.rbcs.common.PasswordSecurity.hashPassword
|
||||||
import net.woggioni.jwo.UncloseableOutputStream
|
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.io.OutputStream
|
import java.io.OutputStream
|
||||||
import java.io.OutputStreamWriter
|
import java.io.OutputStreamWriter
|
||||||
|
@@ -1,11 +1,17 @@
|
|||||||
package net.woggioni.rbcs.cli.impl.commands
|
package net.woggioni.rbcs.cli.impl.commands
|
||||||
|
|
||||||
|
import net.woggioni.jwo.Hash
|
||||||
|
import net.woggioni.jwo.JWO
|
||||||
|
import net.woggioni.jwo.NullOutputStream
|
||||||
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.cli.impl.converters.InputStreamConverter
|
|
||||||
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
import net.woggioni.rbcs.client.RemoteBuildCacheClient
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.io.InputStream
|
import java.io.InputStream
|
||||||
|
import java.nio.file.Files
|
||||||
|
import java.nio.file.Path
|
||||||
|
import java.util.UUID
|
||||||
|
|
||||||
@CommandLine.Command(
|
@CommandLine.Command(
|
||||||
name = "put",
|
name = "put",
|
||||||
@@ -13,25 +19,41 @@ import java.io.InputStream
|
|||||||
showDefaultValues = true
|
showDefaultValues = true
|
||||||
)
|
)
|
||||||
class PutCommand : RbcsCommand() {
|
class PutCommand : RbcsCommand() {
|
||||||
private val log = contextLogger()
|
companion object{
|
||||||
|
private val log = createLogger<PutCommand>()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@CommandLine.Spec
|
@CommandLine.Spec
|
||||||
private lateinit var spec: CommandLine.Model.CommandSpec
|
private lateinit var spec: CommandLine.Model.CommandSpec
|
||||||
|
|
||||||
@CommandLine.Option(
|
@CommandLine.Option(
|
||||||
names = ["-k", "--key"],
|
names = ["-k", "--key"],
|
||||||
description = ["The key for the new value"],
|
description = ["The key for the new value, randomly generated if omitted"],
|
||||||
paramLabel = "KEY"
|
paramLabel = "KEY"
|
||||||
)
|
)
|
||||||
private var key : String = ""
|
private var key : String? = null
|
||||||
|
|
||||||
|
@CommandLine.Option(
|
||||||
|
names = ["-i", "--inline"],
|
||||||
|
description = ["File is to be displayed in the browser"],
|
||||||
|
paramLabel = "INLINE",
|
||||||
|
)
|
||||||
|
private var inline : Boolean = false
|
||||||
|
|
||||||
|
@CommandLine.Option(
|
||||||
|
names = ["-t", "--type"],
|
||||||
|
description = ["File mime type"],
|
||||||
|
paramLabel = "MIME_TYPE",
|
||||||
|
)
|
||||||
|
private var mimeType : String? = null
|
||||||
|
|
||||||
@CommandLine.Option(
|
@CommandLine.Option(
|
||||||
names = ["-v", "--value"],
|
names = ["-v", "--value"],
|
||||||
description = ["Path to a file containing the value to be added (defaults to stdin)"],
|
description = ["Path to a file containing the value to be added (defaults to stdin)"],
|
||||||
paramLabel = "VALUE_FILE",
|
paramLabel = "VALUE_FILE",
|
||||||
converter = [InputStreamConverter::class]
|
|
||||||
)
|
)
|
||||||
private var value : InputStream = System.`in`
|
private var value : Path? = null
|
||||||
|
|
||||||
override fun run() {
|
override fun run() {
|
||||||
val clientCommand = spec.parent().userObject() as ClientCommand
|
val clientCommand = spec.parent().userObject() as ClientCommand
|
||||||
@@ -40,9 +62,40 @@ class PutCommand : RbcsCommand() {
|
|||||||
?: throw IllegalArgumentException("Profile $profileName does not exist in configuration")
|
?: throw IllegalArgumentException("Profile $profileName does not exist in configuration")
|
||||||
}
|
}
|
||||||
RemoteBuildCacheClient(profile).use { client ->
|
RemoteBuildCacheClient(profile).use { client ->
|
||||||
value.use {
|
val inputStream : InputStream
|
||||||
client.put(key, it.readAllBytes())
|
val mimeType : String?
|
||||||
|
val contentDisposition : String?
|
||||||
|
val valuePath = value
|
||||||
|
val actualKey : String?
|
||||||
|
if(valuePath != null) {
|
||||||
|
inputStream = Files.newInputStream(valuePath)
|
||||||
|
mimeType = this.mimeType ?: Files.probeContentType(valuePath)
|
||||||
|
contentDisposition = if(inline) {
|
||||||
|
"inline"
|
||||||
|
} else {
|
||||||
|
"attachment; filename=\"${valuePath.fileName}\""
|
||||||
|
}
|
||||||
|
actualKey = key ?: let {
|
||||||
|
val md = Hash.Algorithm.SHA512.newInputStream(Files.newInputStream(valuePath)).use {
|
||||||
|
JWO.copy(it, NullOutputStream())
|
||||||
|
it.messageDigest
|
||||||
|
}
|
||||||
|
UUID.nameUUIDFromBytes(md.digest()).toString()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inputStream = System.`in`
|
||||||
|
mimeType = this.mimeType
|
||||||
|
contentDisposition = if(inline) {
|
||||||
|
"inline"
|
||||||
|
} else {
|
||||||
|
null
|
||||||
|
}
|
||||||
|
actualKey = key ?: UUID.randomUUID().toString()
|
||||||
|
}
|
||||||
|
inputStream.use {
|
||||||
|
client.put(actualKey, it.readAllBytes(), CacheValueMetadata(contentDisposition, mimeType))
|
||||||
}.get()
|
}.get()
|
||||||
|
println(profile.serverURI.resolve(actualKey))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@@ -1,19 +1,20 @@
|
|||||||
package net.woggioni.rbcs.cli.impl.commands
|
package net.woggioni.rbcs.cli.impl.commands
|
||||||
|
|
||||||
|
import net.woggioni.jwo.Application
|
||||||
|
import net.woggioni.jwo.JWO
|
||||||
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
import net.woggioni.rbcs.cli.impl.RbcsCommand
|
||||||
import net.woggioni.rbcs.cli.impl.converters.DurationConverter
|
import net.woggioni.rbcs.cli.impl.converters.DurationConverter
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import net.woggioni.rbcs.common.debug
|
import net.woggioni.rbcs.common.debug
|
||||||
import net.woggioni.rbcs.common.info
|
import net.woggioni.rbcs.common.info
|
||||||
import net.woggioni.rbcs.server.RemoteBuildCacheServer
|
import net.woggioni.rbcs.server.RemoteBuildCacheServer
|
||||||
import net.woggioni.rbcs.server.RemoteBuildCacheServer.Companion.DEFAULT_CONFIGURATION_URL
|
import net.woggioni.rbcs.server.RemoteBuildCacheServer.Companion.DEFAULT_CONFIGURATION_URL
|
||||||
import net.woggioni.jwo.Application
|
|
||||||
import net.woggioni.jwo.JWO
|
|
||||||
import picocli.CommandLine
|
import picocli.CommandLine
|
||||||
import java.io.ByteArrayOutputStream
|
import java.io.ByteArrayOutputStream
|
||||||
import java.nio.file.Files
|
import java.nio.file.Files
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
|
||||||
@CommandLine.Command(
|
@CommandLine.Command(
|
||||||
name = "server",
|
name = "server",
|
||||||
@@ -21,8 +22,9 @@ import java.time.Duration
|
|||||||
showDefaultValues = true
|
showDefaultValues = true
|
||||||
)
|
)
|
||||||
class ServerCommand(app : Application) : RbcsCommand() {
|
class ServerCommand(app : Application) : RbcsCommand() {
|
||||||
|
companion object {
|
||||||
private val log = contextLogger()
|
private val log = createLogger<ServerCommand>()
|
||||||
|
}
|
||||||
|
|
||||||
private fun createDefaultConfigurationFile(configurationFile: Path) {
|
private fun createDefaultConfigurationFile(configurationFile: Path) {
|
||||||
log.info {
|
log.info {
|
||||||
@@ -66,11 +68,20 @@ class ServerCommand(app : Application) : RbcsCommand() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
val server = RemoteBuildCacheServer(configuration)
|
val server = RemoteBuildCacheServer(configuration)
|
||||||
server.run().use { server ->
|
val handle = server.run()
|
||||||
timeout?.let {
|
val shutdownHook = Thread.ofPlatform().unstarted {
|
||||||
Thread.sleep(it)
|
handle.sendShutdownSignal()
|
||||||
server.shutdown()
|
try {
|
||||||
|
handle.get(60, TimeUnit.SECONDS)
|
||||||
|
} catch (ex : Throwable) {
|
||||||
|
log.warn(ex.message, ex)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Runtime.getRuntime().addShutdownHook(shutdownHook)
|
||||||
|
if(timeout != null) {
|
||||||
|
Thread.sleep(timeout)
|
||||||
|
handle.sendShutdownSignal()
|
||||||
|
}
|
||||||
|
handle.get()
|
||||||
}
|
}
|
||||||
}
|
}
|
@@ -0,0 +1,10 @@
|
|||||||
|
package net.woggioni.rbcs.cli.impl.converters
|
||||||
|
|
||||||
|
import picocli.CommandLine
|
||||||
|
|
||||||
|
|
||||||
|
class ByteSizeConverter : CommandLine.ITypeConverter<Int> {
|
||||||
|
override fun convert(value: String): Int {
|
||||||
|
return Integer.decode(value)
|
||||||
|
}
|
||||||
|
}
|
@@ -6,9 +6,11 @@ plugins {
|
|||||||
dependencies {
|
dependencies {
|
||||||
implementation project(':rbcs-api')
|
implementation project(':rbcs-api')
|
||||||
implementation project(':rbcs-common')
|
implementation project(':rbcs-common')
|
||||||
implementation catalog.picocli
|
|
||||||
implementation catalog.slf4j.api
|
implementation catalog.slf4j.api
|
||||||
implementation catalog.netty.buffer
|
implementation catalog.netty.buffer
|
||||||
|
implementation catalog.netty.handler
|
||||||
|
implementation catalog.netty.transport
|
||||||
|
implementation catalog.netty.common
|
||||||
implementation catalog.netty.codec.http
|
implementation catalog.netty.codec.http
|
||||||
|
|
||||||
testRuntimeOnly catalog.logback.classic
|
testRuntimeOnly catalog.logback.classic
|
||||||
|
@@ -4,7 +4,9 @@ import io.netty.bootstrap.Bootstrap
|
|||||||
import io.netty.buffer.ByteBuf
|
import io.netty.buffer.ByteBuf
|
||||||
import io.netty.buffer.Unpooled
|
import io.netty.buffer.Unpooled
|
||||||
import io.netty.channel.Channel
|
import io.netty.channel.Channel
|
||||||
|
import io.netty.channel.ChannelHandler
|
||||||
import io.netty.channel.ChannelHandlerContext
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.ChannelInboundHandlerAdapter
|
||||||
import io.netty.channel.ChannelOption
|
import io.netty.channel.ChannelOption
|
||||||
import io.netty.channel.ChannelPipeline
|
import io.netty.channel.ChannelPipeline
|
||||||
import io.netty.channel.SimpleChannelInboundHandler
|
import io.netty.channel.SimpleChannelInboundHandler
|
||||||
@@ -28,13 +30,18 @@ import io.netty.handler.codec.http.HttpVersion
|
|||||||
import io.netty.handler.ssl.SslContext
|
import io.netty.handler.ssl.SslContext
|
||||||
import io.netty.handler.ssl.SslContextBuilder
|
import io.netty.handler.ssl.SslContextBuilder
|
||||||
import io.netty.handler.stream.ChunkedWriteHandler
|
import io.netty.handler.stream.ChunkedWriteHandler
|
||||||
|
import io.netty.handler.timeout.IdleState
|
||||||
|
import io.netty.handler.timeout.IdleStateEvent
|
||||||
|
import io.netty.handler.timeout.IdleStateHandler
|
||||||
import io.netty.util.concurrent.Future
|
import io.netty.util.concurrent.Future
|
||||||
import io.netty.util.concurrent.GenericFutureListener
|
import io.netty.util.concurrent.GenericFutureListener
|
||||||
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
import net.woggioni.rbcs.client.impl.Parser
|
import net.woggioni.rbcs.client.impl.Parser
|
||||||
import net.woggioni.rbcs.common.Xml
|
import net.woggioni.rbcs.common.Xml
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import net.woggioni.rbcs.common.debug
|
import net.woggioni.rbcs.common.debug
|
||||||
import net.woggioni.rbcs.common.trace
|
import net.woggioni.rbcs.common.trace
|
||||||
|
import java.io.IOException
|
||||||
import java.net.InetSocketAddress
|
import java.net.InetSocketAddress
|
||||||
import java.net.URI
|
import java.net.URI
|
||||||
import java.nio.file.Files
|
import java.nio.file.Files
|
||||||
@@ -44,14 +51,19 @@ import java.security.cert.X509Certificate
|
|||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.util.Base64
|
import java.util.Base64
|
||||||
import java.util.concurrent.CompletableFuture
|
import java.util.concurrent.CompletableFuture
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
import java.util.concurrent.TimeoutException
|
||||||
import java.util.concurrent.atomic.AtomicInteger
|
import java.util.concurrent.atomic.AtomicInteger
|
||||||
|
import kotlin.random.Random
|
||||||
import io.netty.util.concurrent.Future as NettyFuture
|
import io.netty.util.concurrent.Future as NettyFuture
|
||||||
|
|
||||||
|
|
||||||
class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoCloseable {
|
class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoCloseable {
|
||||||
|
companion object{
|
||||||
|
private val log = createLogger<RemoteBuildCacheClient>()
|
||||||
|
}
|
||||||
|
|
||||||
private val group: NioEventLoopGroup
|
private val group: NioEventLoopGroup
|
||||||
private var sslContext: SslContext
|
private var sslContext: SslContext
|
||||||
private val log = contextLogger()
|
|
||||||
private val pool: ChannelPool
|
private val pool: ChannelPool
|
||||||
|
|
||||||
data class Configuration(
|
data class Configuration(
|
||||||
@@ -72,11 +84,21 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
val exp: Double
|
val exp: Double
|
||||||
)
|
)
|
||||||
|
|
||||||
|
class Connection(
|
||||||
|
val readTimeout: Duration,
|
||||||
|
val writeTimeout: Duration,
|
||||||
|
val idleTimeout: Duration,
|
||||||
|
val readIdleTimeout: Duration,
|
||||||
|
val writeIdleTimeout: Duration
|
||||||
|
)
|
||||||
|
|
||||||
data class Profile(
|
data class Profile(
|
||||||
val serverURI: URI,
|
val serverURI: URI,
|
||||||
|
val connection: Connection?,
|
||||||
val authentication: Authentication?,
|
val authentication: Authentication?,
|
||||||
val connectionTimeout: Duration?,
|
val connectionTimeout: Duration?,
|
||||||
val maxConnections: Int,
|
val maxConnections: Int,
|
||||||
|
val compressionEnabled: Boolean,
|
||||||
val retryPolicy: RetryPolicy?,
|
val retryPolicy: RetryPolicy?,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -141,18 +163,50 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
}
|
}
|
||||||
|
|
||||||
override fun channelCreated(ch: Channel) {
|
override fun channelCreated(ch: Channel) {
|
||||||
val connectionId = connectionCount.getAndIncrement()
|
val connectionId = connectionCount.incrementAndGet()
|
||||||
log.debug {
|
log.debug {
|
||||||
"Created connection $connectionId, total number of active connections: $connectionId"
|
"Created connection ${ch.id().asShortText()}, total number of active connections: $connectionId"
|
||||||
}
|
}
|
||||||
ch.closeFuture().addListener {
|
ch.closeFuture().addListener {
|
||||||
val activeConnections = connectionCount.decrementAndGet()
|
val activeConnections = connectionCount.decrementAndGet()
|
||||||
log.debug {
|
log.debug {
|
||||||
"Closed connection $connectionId, total number of active connections: $activeConnections"
|
"Closed connection ${
|
||||||
|
ch.id().asShortText()
|
||||||
|
}, total number of active connections: $activeConnections"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
val pipeline: ChannelPipeline = ch.pipeline()
|
val pipeline: ChannelPipeline = ch.pipeline()
|
||||||
|
|
||||||
|
profile.connection?.also { conn ->
|
||||||
|
val readTimeout = conn.readTimeout.toMillis()
|
||||||
|
val writeTimeout = conn.writeTimeout.toMillis()
|
||||||
|
if (readTimeout > 0 || writeTimeout > 0) {
|
||||||
|
pipeline.addLast(
|
||||||
|
IdleStateHandler(
|
||||||
|
false,
|
||||||
|
readTimeout,
|
||||||
|
writeTimeout,
|
||||||
|
0,
|
||||||
|
TimeUnit.MILLISECONDS
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
val readIdleTimeout = conn.readIdleTimeout.toMillis()
|
||||||
|
val writeIdleTimeout = conn.writeIdleTimeout.toMillis()
|
||||||
|
val idleTimeout = conn.idleTimeout.toMillis()
|
||||||
|
if (readIdleTimeout > 0 || writeIdleTimeout > 0 || idleTimeout > 0) {
|
||||||
|
pipeline.addLast(
|
||||||
|
IdleStateHandler(
|
||||||
|
true,
|
||||||
|
readIdleTimeout,
|
||||||
|
writeIdleTimeout,
|
||||||
|
idleTimeout,
|
||||||
|
TimeUnit.MILLISECONDS
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Add SSL handler if needed
|
// Add SSL handler if needed
|
||||||
if ("https".equals(scheme, ignoreCase = true)) {
|
if ("https".equals(scheme, ignoreCase = true)) {
|
||||||
pipeline.addLast("ssl", sslContext.newHandler(ch.alloc(), host, port))
|
pipeline.addLast("ssl", sslContext.newHandler(ch.alloc(), host, port))
|
||||||
@@ -160,7 +214,9 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
|
|
||||||
// HTTP handlers
|
// HTTP handlers
|
||||||
pipeline.addLast("codec", HttpClientCodec())
|
pipeline.addLast("codec", HttpClientCodec())
|
||||||
pipeline.addLast("decompressor", HttpContentDecompressor())
|
if(profile.compressionEnabled) {
|
||||||
|
pipeline.addLast("decompressor", HttpContentDecompressor())
|
||||||
|
}
|
||||||
pipeline.addLast("aggregator", HttpObjectAggregator(134217728))
|
pipeline.addLast("aggregator", HttpObjectAggregator(134217728))
|
||||||
pipeline.addLast("chunked", ChunkedWriteHandler())
|
pipeline.addLast("chunked", ChunkedWriteHandler())
|
||||||
}
|
}
|
||||||
@@ -206,6 +262,7 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
retryPolicy.initialDelayMillis.toDouble(),
|
retryPolicy.initialDelayMillis.toDouble(),
|
||||||
retryPolicy.exp,
|
retryPolicy.exp,
|
||||||
outcomeHandler,
|
outcomeHandler,
|
||||||
|
Random.Default,
|
||||||
operation
|
operation
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
@@ -253,9 +310,13 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fun put(key: String, content: ByteArray): CompletableFuture<Unit> {
|
fun put(key: String, content: ByteArray, metadata: CacheValueMetadata): CompletableFuture<Unit> {
|
||||||
return executeWithRetry {
|
return executeWithRetry {
|
||||||
sendRequest(profile.serverURI.resolve(key), HttpMethod.PUT, content)
|
val extraHeaders = sequenceOf(
|
||||||
|
metadata.mimeType?.let { HttpHeaderNames.CONTENT_TYPE to it },
|
||||||
|
metadata.contentDisposition?.let { HttpHeaderNames.CONTENT_DISPOSITION to it }
|
||||||
|
).filterNotNull()
|
||||||
|
sendRequest(profile.serverURI.resolve(key), HttpMethod.PUT, content, extraHeaders.asIterable())
|
||||||
}.thenApply {
|
}.thenApply {
|
||||||
val status = it.status()
|
val status = it.status()
|
||||||
if (it.status() != HttpResponseStatus.CREATED && it.status() != HttpResponseStatus.OK) {
|
if (it.status() != HttpResponseStatus.CREATED && it.status() != HttpResponseStatus.OK) {
|
||||||
@@ -264,35 +325,83 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun sendRequest(uri: URI, method: HttpMethod, body: ByteArray?): CompletableFuture<FullHttpResponse> {
|
private fun sendRequest(
|
||||||
|
uri: URI,
|
||||||
|
method: HttpMethod,
|
||||||
|
body: ByteArray?,
|
||||||
|
extraHeaders: Iterable<Pair<CharSequence, CharSequence>>? = null
|
||||||
|
): CompletableFuture<FullHttpResponse> {
|
||||||
val responseFuture = CompletableFuture<FullHttpResponse>()
|
val responseFuture = CompletableFuture<FullHttpResponse>()
|
||||||
// Custom handler for processing responses
|
// Custom handler for processing responses
|
||||||
|
|
||||||
pool.acquire().addListener(object : GenericFutureListener<NettyFuture<Channel>> {
|
pool.acquire().addListener(object : GenericFutureListener<NettyFuture<Channel>> {
|
||||||
|
private val handlers = mutableListOf<ChannelHandler>()
|
||||||
|
|
||||||
|
fun cleanup(channel: Channel, pipeline: ChannelPipeline) {
|
||||||
|
handlers.forEach(pipeline::remove)
|
||||||
|
pool.release(channel)
|
||||||
|
}
|
||||||
|
|
||||||
override fun operationComplete(channelFuture: Future<Channel>) {
|
override fun operationComplete(channelFuture: Future<Channel>) {
|
||||||
if (channelFuture.isSuccess) {
|
if (channelFuture.isSuccess) {
|
||||||
val channel = channelFuture.now
|
val channel = channelFuture.now
|
||||||
val pipeline = channel.pipeline()
|
val pipeline = channel.pipeline()
|
||||||
channel.pipeline().addLast("handler", object : SimpleChannelInboundHandler<FullHttpResponse>() {
|
val timeoutHandler = object : ChannelInboundHandlerAdapter() {
|
||||||
|
override fun userEventTriggered(ctx: ChannelHandlerContext, evt: Any) {
|
||||||
|
if (evt is IdleStateEvent) {
|
||||||
|
val te = when (evt.state()) {
|
||||||
|
IdleState.READER_IDLE -> TimeoutException(
|
||||||
|
"Read timeout",
|
||||||
|
)
|
||||||
|
|
||||||
|
IdleState.WRITER_IDLE -> TimeoutException("Write timeout")
|
||||||
|
|
||||||
|
IdleState.ALL_IDLE -> TimeoutException("Idle timeout")
|
||||||
|
null -> throw IllegalStateException("This should never happen")
|
||||||
|
}
|
||||||
|
responseFuture.completeExceptionally(te)
|
||||||
|
ctx.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val closeListener = GenericFutureListener<Future<Void>> {
|
||||||
|
responseFuture.completeExceptionally(IOException("The remote server closed the connection"))
|
||||||
|
pool.release(channel)
|
||||||
|
}
|
||||||
|
|
||||||
|
val responseHandler = object : SimpleChannelInboundHandler<FullHttpResponse>() {
|
||||||
override fun channelRead0(
|
override fun channelRead0(
|
||||||
ctx: ChannelHandlerContext,
|
ctx: ChannelHandlerContext,
|
||||||
response: FullHttpResponse
|
response: FullHttpResponse
|
||||||
) {
|
) {
|
||||||
pipeline.removeLast()
|
channel.closeFuture().removeListener(closeListener)
|
||||||
pool.release(channel)
|
cleanup(channel, pipeline)
|
||||||
responseFuture.complete(response)
|
responseFuture.complete(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
|
ctx.newPromise()
|
||||||
val ex = when (cause) {
|
val ex = when (cause) {
|
||||||
is DecoderException -> cause.cause
|
is DecoderException -> cause.cause
|
||||||
else -> cause
|
else -> cause
|
||||||
}
|
}
|
||||||
responseFuture.completeExceptionally(ex)
|
responseFuture.completeExceptionally(ex)
|
||||||
ctx.close()
|
ctx.close()
|
||||||
pipeline.removeLast()
|
|
||||||
pool.release(channel)
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
|
override fun channelInactive(ctx: ChannelHandlerContext) {
|
||||||
|
pool.release(channel)
|
||||||
|
responseFuture.completeExceptionally(IOException("The remote server closed the connection"))
|
||||||
|
super.channelInactive(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (handler in arrayOf(timeoutHandler, responseHandler)) {
|
||||||
|
handlers.add(handler)
|
||||||
|
}
|
||||||
|
pipeline.addLast(timeoutHandler, responseHandler)
|
||||||
|
channel.closeFuture().addListener(closeListener)
|
||||||
|
|
||||||
|
|
||||||
// Prepare the HTTP request
|
// Prepare the HTTP request
|
||||||
val request: FullHttpRequest = let {
|
val request: FullHttpRequest = let {
|
||||||
val content: ByteBuf? = body?.takeIf(ByteArray::isNotEmpty)?.let(Unpooled::wrappedBuffer)
|
val content: ByteBuf? = body?.takeIf(ByteArray::isNotEmpty)?.let(Unpooled::wrappedBuffer)
|
||||||
@@ -304,15 +413,19 @@ class RemoteBuildCacheClient(private val profile: Configuration.Profile) : AutoC
|
|||||||
).apply {
|
).apply {
|
||||||
headers().apply {
|
headers().apply {
|
||||||
if (content != null) {
|
if (content != null) {
|
||||||
set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_OCTET_STREAM)
|
|
||||||
set(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes())
|
set(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes())
|
||||||
}
|
}
|
||||||
set(HttpHeaderNames.HOST, profile.serverURI.host)
|
set(HttpHeaderNames.HOST, profile.serverURI.host)
|
||||||
set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE)
|
set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE)
|
||||||
set(
|
if(profile.compressionEnabled) {
|
||||||
HttpHeaderNames.ACCEPT_ENCODING,
|
set(
|
||||||
HttpHeaderValues.GZIP.toString() + "," + HttpHeaderValues.DEFLATE.toString()
|
HttpHeaderNames.ACCEPT_ENCODING,
|
||||||
)
|
HttpHeaderValues.GZIP.toString() + "," + HttpHeaderValues.DEFLATE.toString()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
extraHeaders?.forEach { (k, v) ->
|
||||||
|
add(k, v)
|
||||||
|
}
|
||||||
// Add basic auth if configured
|
// Add basic auth if configured
|
||||||
(profile.authentication as? Configuration.Authentication.BasicAuthenticationCredentials)?.let { credentials ->
|
(profile.authentication as? Configuration.Authentication.BasicAuthenticationCredentials)?.let { credentials ->
|
||||||
val auth = "${credentials.username}:${credentials.password}"
|
val auth = "${credentials.username}:${credentials.password}"
|
||||||
|
@@ -12,6 +12,7 @@ import java.security.KeyStore
|
|||||||
import java.security.PrivateKey
|
import java.security.PrivateKey
|
||||||
import java.security.cert.X509Certificate
|
import java.security.cert.X509Certificate
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
|
import java.time.temporal.ChronoUnit
|
||||||
|
|
||||||
object Parser {
|
object Parser {
|
||||||
|
|
||||||
@@ -29,6 +30,7 @@ object Parser {
|
|||||||
?: throw ConfigurationException("base-url attribute is required")
|
?: throw ConfigurationException("base-url attribute is required")
|
||||||
var authentication: RemoteBuildCacheClient.Configuration.Authentication? = null
|
var authentication: RemoteBuildCacheClient.Configuration.Authentication? = null
|
||||||
var retryPolicy: RemoteBuildCacheClient.Configuration.RetryPolicy? = null
|
var retryPolicy: RemoteBuildCacheClient.Configuration.RetryPolicy? = null
|
||||||
|
var connection : RemoteBuildCacheClient.Configuration.Connection? = null
|
||||||
for (gchild in child.asIterable()) {
|
for (gchild in child.asIterable()) {
|
||||||
when (gchild.localName) {
|
when (gchild.localName) {
|
||||||
"tls-client-auth" -> {
|
"tls-client-auth" -> {
|
||||||
@@ -86,6 +88,26 @@ object Parser {
|
|||||||
exp.toDouble()
|
exp.toDouble()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
"connection" -> {
|
||||||
|
val writeTimeout = gchild.renderAttribute("write-timeout")
|
||||||
|
?.let(Duration::parse) ?: Duration.of(0, ChronoUnit.SECONDS)
|
||||||
|
val readTimeout = gchild.renderAttribute("read-timeout")
|
||||||
|
?.let(Duration::parse) ?: Duration.of(0, ChronoUnit.SECONDS)
|
||||||
|
val idleTimeout = gchild.renderAttribute("idle-timeout")
|
||||||
|
?.let(Duration::parse) ?: Duration.of(30, ChronoUnit.SECONDS)
|
||||||
|
val readIdleTimeout = gchild.renderAttribute("read-idle-timeout")
|
||||||
|
?.let(Duration::parse) ?: Duration.of(60, ChronoUnit.SECONDS)
|
||||||
|
val writeIdleTimeout = gchild.renderAttribute("write-idle-timeout")
|
||||||
|
?.let(Duration::parse) ?: Duration.of(60, ChronoUnit.SECONDS)
|
||||||
|
connection = RemoteBuildCacheClient.Configuration.Connection(
|
||||||
|
readTimeout,
|
||||||
|
writeTimeout,
|
||||||
|
idleTimeout,
|
||||||
|
readIdleTimeout,
|
||||||
|
writeIdleTimeout,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
val maxConnections = child.renderAttribute("max-connections")
|
val maxConnections = child.renderAttribute("max-connections")
|
||||||
@@ -93,11 +115,17 @@ object Parser {
|
|||||||
?: 50
|
?: 50
|
||||||
val connectionTimeout = child.renderAttribute("connection-timeout")
|
val connectionTimeout = child.renderAttribute("connection-timeout")
|
||||||
?.let(Duration::parse)
|
?.let(Duration::parse)
|
||||||
|
val compressionEnabled = child.renderAttribute("enable-compression")
|
||||||
|
?.let(String::toBoolean)
|
||||||
|
?: true
|
||||||
|
|
||||||
profiles[name] = RemoteBuildCacheClient.Configuration.Profile(
|
profiles[name] = RemoteBuildCacheClient.Configuration.Profile(
|
||||||
uri,
|
uri,
|
||||||
|
connection,
|
||||||
authentication,
|
authentication,
|
||||||
connectionTimeout,
|
connectionTimeout,
|
||||||
maxConnections,
|
maxConnections,
|
||||||
|
compressionEnabled,
|
||||||
retryPolicy
|
retryPolicy
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@@ -3,6 +3,8 @@ package net.woggioni.rbcs.client
|
|||||||
import io.netty.util.concurrent.EventExecutorGroup
|
import io.netty.util.concurrent.EventExecutorGroup
|
||||||
import java.util.concurrent.CompletableFuture
|
import java.util.concurrent.CompletableFuture
|
||||||
import java.util.concurrent.TimeUnit
|
import java.util.concurrent.TimeUnit
|
||||||
|
import kotlin.math.pow
|
||||||
|
import kotlin.random.Random
|
||||||
|
|
||||||
sealed class OperationOutcome<T> {
|
sealed class OperationOutcome<T> {
|
||||||
class Success<T>(val result: T) : OperationOutcome<T>()
|
class Success<T>(val result: T) : OperationOutcome<T>()
|
||||||
@@ -24,8 +26,10 @@ fun <T> executeWithRetry(
|
|||||||
initialDelay: Double,
|
initialDelay: Double,
|
||||||
exp: Double,
|
exp: Double,
|
||||||
outcomeHandler: OutcomeHandler<T>,
|
outcomeHandler: OutcomeHandler<T>,
|
||||||
|
randomizer : Random?,
|
||||||
cb: () -> CompletableFuture<T>
|
cb: () -> CompletableFuture<T>
|
||||||
): CompletableFuture<T> {
|
): CompletableFuture<T> {
|
||||||
|
|
||||||
val finalResult = cb()
|
val finalResult = cb()
|
||||||
var future = finalResult
|
var future = finalResult
|
||||||
var shortCircuit = false
|
var shortCircuit = false
|
||||||
@@ -46,7 +50,7 @@ fun <T> executeWithRetry(
|
|||||||
is OutcomeHandlerResult.Retry -> {
|
is OutcomeHandlerResult.Retry -> {
|
||||||
val res = CompletableFuture<T>()
|
val res = CompletableFuture<T>()
|
||||||
val delay = run {
|
val delay = run {
|
||||||
val scheduledDelay = (initialDelay * Math.pow(exp, i.toDouble())).toLong()
|
val scheduledDelay = (initialDelay * exp.pow(i.toDouble()) * (1.0 + (randomizer?.nextDouble(-0.5, 0.5) ?: 0.0))).toLong()
|
||||||
outcomeHandlerResult.suggestedDelayMillis?.coerceAtMost(scheduledDelay) ?: scheduledDelay
|
outcomeHandlerResult.suggestedDelayMillis?.coerceAtMost(scheduledDelay) ?: scheduledDelay
|
||||||
}
|
}
|
||||||
eventExecutorGroup.schedule({
|
eventExecutorGroup.schedule({
|
||||||
|
@@ -19,12 +19,22 @@
|
|||||||
<xs:element name="basic-auth" type="rbcs-client:basicAuthType"/>
|
<xs:element name="basic-auth" type="rbcs-client:basicAuthType"/>
|
||||||
<xs:element name="tls-client-auth" type="rbcs-client:tlsClientAuthType"/>
|
<xs:element name="tls-client-auth" type="rbcs-client:tlsClientAuthType"/>
|
||||||
</xs:choice>
|
</xs:choice>
|
||||||
|
<xs:element name="connection" type="rbcs-client:connectionType" minOccurs="0" />
|
||||||
<xs:element name="retry-policy" type="rbcs-client:retryType" minOccurs="0"/>
|
<xs:element name="retry-policy" type="rbcs-client:retryType" minOccurs="0"/>
|
||||||
</xs:sequence>
|
</xs:sequence>
|
||||||
<xs:attribute name="name" type="xs:token" use="required"/>
|
<xs:attribute name="name" type="xs:token" use="required"/>
|
||||||
<xs:attribute name="base-url" type="xs:anyURI" use="required"/>
|
<xs:attribute name="base-url" type="xs:anyURI" use="required"/>
|
||||||
<xs:attribute name="max-connections" type="xs:positiveInteger" default="50"/>
|
<xs:attribute name="max-connections" type="xs:positiveInteger" default="50"/>
|
||||||
<xs:attribute name="connection-timeout" type="xs:duration"/>
|
<xs:attribute name="connection-timeout" type="xs:duration"/>
|
||||||
|
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
|
||||||
|
</xs:complexType>
|
||||||
|
|
||||||
|
<xs:complexType name="connectionType">
|
||||||
|
<xs:attribute name="read-timeout" type="xs:duration" use="optional" default="PT0S"/>
|
||||||
|
<xs:attribute name="write-timeout" type="xs:duration" use="optional" default="PT0S"/>
|
||||||
|
<xs:attribute name="idle-timeout" type="xs:duration" use="optional" default="PT30S"/>
|
||||||
|
<xs:attribute name="read-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
|
||||||
|
<xs:attribute name="write-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
|
||||||
</xs:complexType>
|
</xs:complexType>
|
||||||
|
|
||||||
<xs:complexType name="noAuthType"/>
|
<xs:complexType name="noAuthType"/>
|
||||||
|
@@ -89,7 +89,7 @@ class RetryTest {
|
|||||||
val random = Random(testArgs.seed)
|
val random = Random(testArgs.seed)
|
||||||
|
|
||||||
val future =
|
val future =
|
||||||
executeWithRetry(executor, testArgs.maxAttempt, testArgs.initialDelay, testArgs.exp, outcomeHandler) {
|
executeWithRetry(executor, testArgs.maxAttempt, testArgs.initialDelay, testArgs.exp, outcomeHandler, null) {
|
||||||
val now = System.nanoTime()
|
val now = System.nanoTime()
|
||||||
val result = CompletableFuture<Int>()
|
val result = CompletableFuture<Int>()
|
||||||
executor.submit {
|
executor.submit {
|
||||||
@@ -129,7 +129,7 @@ class RetryTest {
|
|||||||
previousAttempt.first + testArgs.initialDelay * Math.pow(testArgs.exp, index.toDouble()) * 1e6
|
previousAttempt.first + testArgs.initialDelay * Math.pow(testArgs.exp, index.toDouble()) * 1e6
|
||||||
val actualTimestamp = timestamp
|
val actualTimestamp = timestamp
|
||||||
val err = Math.abs(expectedTimestamp - actualTimestamp) / expectedTimestamp
|
val err = Math.abs(expectedTimestamp - actualTimestamp) / expectedTimestamp
|
||||||
Assertions.assertTrue(err < 1e-3)
|
Assertions.assertTrue(err < 1e-2)
|
||||||
}
|
}
|
||||||
if (index == attempts.size - 1 && index < testArgs.maxAttempt - 1) {
|
if (index == attempts.size - 1 && index < testArgs.maxAttempt - 1) {
|
||||||
/*
|
/*
|
||||||
|
@@ -5,6 +5,7 @@ module net.woggioni.rbcs.common {
|
|||||||
requires kotlin.stdlib;
|
requires kotlin.stdlib;
|
||||||
requires net.woggioni.jwo;
|
requires net.woggioni.jwo;
|
||||||
requires io.netty.buffer;
|
requires io.netty.buffer;
|
||||||
|
requires io.netty.transport;
|
||||||
|
|
||||||
provides java.net.spi.URLStreamHandlerProvider with net.woggioni.rbcs.common.RbcsUrlStreamHandlerFactory;
|
provides java.net.spi.URLStreamHandlerProvider with net.woggioni.rbcs.common.RbcsUrlStreamHandlerFactory;
|
||||||
exports net.woggioni.rbcs.common;
|
exports net.woggioni.rbcs.common;
|
||||||
|
15
rbcs-common/src/main/kotlin/net/woggioni/rbcs/common/BB.kt
Normal file
15
rbcs-common/src/main/kotlin/net/woggioni/rbcs/common/BB.kt
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
package net.woggioni.rbcs.common
|
||||||
|
|
||||||
|
import io.netty.buffer.ByteBuf
|
||||||
|
import io.netty.buffer.ByteBufAllocator
|
||||||
|
import io.netty.buffer.CompositeByteBuf
|
||||||
|
|
||||||
|
fun extractChunk(buf: CompositeByteBuf, alloc: ByteBufAllocator): ByteBuf {
|
||||||
|
val chunk = alloc.compositeBuffer()
|
||||||
|
for (component in buf.decompose(0, buf.readableBytes())) {
|
||||||
|
chunk.addComponent(true, component.retain())
|
||||||
|
}
|
||||||
|
buf.removeComponents(0, buf.numComponents())
|
||||||
|
buf.clear()
|
||||||
|
return chunk
|
||||||
|
}
|
@@ -1,90 +1,173 @@
|
|||||||
package net.woggioni.rbcs.common
|
package net.woggioni.rbcs.common
|
||||||
|
|
||||||
|
import io.netty.channel.Channel
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
import org.slf4j.Logger
|
import org.slf4j.Logger
|
||||||
import org.slf4j.LoggerFactory
|
import org.slf4j.LoggerFactory
|
||||||
|
import org.slf4j.MDC
|
||||||
import org.slf4j.event.Level
|
import org.slf4j.event.Level
|
||||||
|
import org.slf4j.spi.LoggingEventBuilder
|
||||||
import java.nio.file.Files
|
import java.nio.file.Files
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
import java.util.logging.LogManager
|
import java.util.logging.LogManager
|
||||||
|
|
||||||
inline fun <reified T> T.contextLogger() = LoggerFactory.getLogger(T::class.java)
|
inline fun <reified T> T.contextLogger() = LoggerFactory.getLogger(T::class.java)
|
||||||
|
inline fun <reified T> createLogger() = LoggerFactory.getLogger(T::class.java)
|
||||||
|
|
||||||
inline fun Logger.traceParam(messageBuilder : () -> Pair<String, Array<Any>>) {
|
inline fun Logger.traceParam(messageBuilder: () -> Pair<String, Array<Any>>) {
|
||||||
if(isTraceEnabled) {
|
if (isTraceEnabled) {
|
||||||
val (format, params) = messageBuilder()
|
val (format, params) = messageBuilder()
|
||||||
trace(format, params)
|
trace(format, params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.debugParam(messageBuilder : () -> Pair<String, Array<Any>>) {
|
inline fun Logger.debugParam(messageBuilder: () -> Pair<String, Array<Any>>) {
|
||||||
if(isDebugEnabled) {
|
if (isDebugEnabled) {
|
||||||
val (format, params) = messageBuilder()
|
val (format, params) = messageBuilder()
|
||||||
info(format, params)
|
info(format, params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.infoParam(messageBuilder : () -> Pair<String, Array<Any>>) {
|
inline fun Logger.infoParam(messageBuilder: () -> Pair<String, Array<Any>>) {
|
||||||
if(isInfoEnabled) {
|
if (isInfoEnabled) {
|
||||||
val (format, params) = messageBuilder()
|
val (format, params) = messageBuilder()
|
||||||
info(format, params)
|
info(format, params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.warnParam(messageBuilder : () -> Pair<String, Array<Any>>) {
|
inline fun Logger.warnParam(messageBuilder: () -> Pair<String, Array<Any>>) {
|
||||||
if(isWarnEnabled) {
|
if (isWarnEnabled) {
|
||||||
val (format, params) = messageBuilder()
|
val (format, params) = messageBuilder()
|
||||||
warn(format, params)
|
warn(format, params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.errorParam(messageBuilder : () -> Pair<String, Array<Any>>) {
|
inline fun Logger.errorParam(messageBuilder: () -> Pair<String, Array<Any>>) {
|
||||||
if(isErrorEnabled) {
|
if (isErrorEnabled) {
|
||||||
val (format, params) = messageBuilder()
|
val (format, params) = messageBuilder()
|
||||||
error(format, params)
|
error(format, params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline fun log(log : Logger,
|
inline fun log(
|
||||||
filter : Logger.() -> Boolean,
|
log: Logger,
|
||||||
loggerMethod : Logger.(String) -> Unit, messageBuilder : () -> String) {
|
filter: Logger.() -> Boolean,
|
||||||
if(log.filter()) {
|
loggerMethod: Logger.(String) -> Unit, messageBuilder: () -> String
|
||||||
|
) {
|
||||||
|
if (log.filter()) {
|
||||||
log.loggerMethod(messageBuilder())
|
log.loggerMethod(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.log(level : Level, messageBuilder : () -> String) {
|
fun withMDC(params: Array<Pair<String, String>>, cb: () -> Unit) {
|
||||||
if(isEnabledForLevel(level)) {
|
object : AutoCloseable {
|
||||||
|
override fun close() {
|
||||||
|
for ((key, _) in params) MDC.remove(key)
|
||||||
|
}
|
||||||
|
}.use {
|
||||||
|
for ((key, value) in params) MDC.put(key, value)
|
||||||
|
cb()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.log(level: Level, channel: Channel, crossinline messageBuilder: (LoggingEventBuilder) -> Unit ) {
|
||||||
|
if (isEnabledForLevel(level)) {
|
||||||
|
val params = arrayOf<Pair<String, String>>(
|
||||||
|
"channel-id-short" to channel.id().asShortText(),
|
||||||
|
"channel-id-long" to channel.id().asLongText(),
|
||||||
|
"remote-address" to channel.remoteAddress().toString(),
|
||||||
|
"local-address" to channel.localAddress().toString(),
|
||||||
|
)
|
||||||
|
withMDC(params) {
|
||||||
|
val builder = makeLoggingEventBuilder(level)
|
||||||
|
// for ((key, value) in params) {
|
||||||
|
// builder.addKeyValue(key, value)
|
||||||
|
// }
|
||||||
|
messageBuilder(builder)
|
||||||
|
builder.log()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inline fun Logger.log(level: Level, channel: Channel, crossinline messageBuilder: () -> String) {
|
||||||
|
log(level, channel) { builder ->
|
||||||
|
builder.setMessage(messageBuilder())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.trace(ch: Channel, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.TRACE, ch, messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.debug(ch: Channel, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.DEBUG, ch, messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.info(ch: Channel, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.INFO, ch, messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.warn(ch: Channel, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.WARN, ch, messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.error(ch: Channel, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.ERROR, ch, messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.trace(ctx: ChannelHandlerContext, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.TRACE, ctx.channel(), messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.debug(ctx: ChannelHandlerContext, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.DEBUG, ctx.channel(), messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.info(ctx: ChannelHandlerContext, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.INFO, ctx.channel(), messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.warn(ctx: ChannelHandlerContext, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.WARN, ctx.channel(), messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
inline fun Logger.error(ctx: ChannelHandlerContext, crossinline messageBuilder: () -> String) {
|
||||||
|
log(Level.ERROR, ctx.channel(), messageBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline fun Logger.log(level: Level, messageBuilder: () -> String) {
|
||||||
|
if (isEnabledForLevel(level)) {
|
||||||
makeLoggingEventBuilder(level).log(messageBuilder())
|
makeLoggingEventBuilder(level).log(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.trace(messageBuilder : () -> String) {
|
inline fun Logger.trace(messageBuilder: () -> String) {
|
||||||
if(isTraceEnabled) {
|
if (isTraceEnabled) {
|
||||||
trace(messageBuilder())
|
trace(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.debug(messageBuilder : () -> String) {
|
inline fun Logger.debug(messageBuilder: () -> String) {
|
||||||
if(isDebugEnabled) {
|
if (isDebugEnabled) {
|
||||||
debug(messageBuilder())
|
debug(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.info(messageBuilder : () -> String) {
|
inline fun Logger.info(messageBuilder: () -> String) {
|
||||||
if(isInfoEnabled) {
|
if (isInfoEnabled) {
|
||||||
info(messageBuilder())
|
info(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.warn(messageBuilder : () -> String) {
|
inline fun Logger.warn(messageBuilder: () -> String) {
|
||||||
if(isWarnEnabled) {
|
if (isWarnEnabled) {
|
||||||
warn(messageBuilder())
|
warn(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline fun Logger.error(messageBuilder : () -> String) {
|
inline fun Logger.error(messageBuilder: () -> String) {
|
||||||
if(isErrorEnabled) {
|
if (isErrorEnabled) {
|
||||||
error(messageBuilder())
|
error(messageBuilder())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -94,9 +177,9 @@ class LoggingConfig {
|
|||||||
|
|
||||||
init {
|
init {
|
||||||
val logManager = LogManager.getLogManager()
|
val logManager = LogManager.getLogManager()
|
||||||
System.getProperty("log.config.source")?.let withSource@ { source ->
|
System.getProperty("log.config.source")?.let withSource@{ source ->
|
||||||
val urls = LoggingConfig::class.java.classLoader.getResources(source)
|
val urls = LoggingConfig::class.java.classLoader.getResources(source)
|
||||||
while(urls.hasMoreElements()) {
|
while (urls.hasMoreElements()) {
|
||||||
val url = urls.nextElement()
|
val url = urls.nextElement()
|
||||||
url.openStream().use { inputStream ->
|
url.openStream().use { inputStream ->
|
||||||
logManager.readConfiguration(inputStream)
|
logManager.readConfiguration(inputStream)
|
||||||
|
@@ -12,6 +12,24 @@ object RBCS {
|
|||||||
const val RBCS_PREFIX: String = "rbcs"
|
const val RBCS_PREFIX: String = "rbcs"
|
||||||
const val XML_SCHEMA_NAMESPACE_URI = "http://www.w3.org/2001/XMLSchema-instance"
|
const val XML_SCHEMA_NAMESPACE_URI = "http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
|
||||||
|
fun ByteArray.toInt(index : Int = 0) : Long {
|
||||||
|
if(index + 4 > size) throw IllegalArgumentException("Not enough bytes to decode a 32 bits integer")
|
||||||
|
var value : Long = 0
|
||||||
|
for (b in index until index + 4) {
|
||||||
|
value = (value shl 8) + (get(b).toInt() and 0xFF)
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
fun ByteArray.toLong(index : Int = 0) : Long {
|
||||||
|
if(index + 8 > size) throw IllegalArgumentException("Not enough bytes to decode a 64 bits long integer")
|
||||||
|
var value : Long = 0
|
||||||
|
for (b in index until index + 8) {
|
||||||
|
value = (value shl 8) + (get(b).toInt() and 0xFF)
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
fun digest(
|
fun digest(
|
||||||
data: ByteArray,
|
data: ByteArray,
|
||||||
md: MessageDigest = MessageDigest.getInstance("MD5")
|
md: MessageDigest = MessageDigest.getInstance("MD5")
|
||||||
@@ -26,4 +44,18 @@ object RBCS {
|
|||||||
): String {
|
): String {
|
||||||
return JWO.bytesToHex(digest(data, md))
|
return JWO.bytesToHex(digest(data, md))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun processCacheKey(key: String, digestAlgorithm: String?) = digestAlgorithm
|
||||||
|
?.let(MessageDigest::getInstance)
|
||||||
|
?.let { md ->
|
||||||
|
digest(key.toByteArray(), md)
|
||||||
|
} ?: key.toByteArray(Charsets.UTF_8)
|
||||||
|
|
||||||
|
fun Long.toIntOrNull(): Int? {
|
||||||
|
return if (this >= Int.MIN_VALUE && this <= Int.MAX_VALUE) {
|
||||||
|
toInt()
|
||||||
|
} else {
|
||||||
|
null
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
@@ -1,7 +1,6 @@
|
|||||||
package net.woggioni.rbcs.common
|
package net.woggioni.rbcs.common
|
||||||
|
|
||||||
import net.woggioni.jwo.JWO
|
import net.woggioni.jwo.JWO
|
||||||
import org.slf4j.LoggerFactory
|
|
||||||
import org.slf4j.event.Level
|
import org.slf4j.event.Level
|
||||||
import org.w3c.dom.Document
|
import org.w3c.dom.Document
|
||||||
import org.w3c.dom.Element
|
import org.w3c.dom.Element
|
||||||
@@ -79,7 +78,7 @@ class Xml(val doc: Document, val element: Element) {
|
|||||||
class ErrorHandler(private val fileURL: URL) : ErrHandler {
|
class ErrorHandler(private val fileURL: URL) : ErrHandler {
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
private val log = LoggerFactory.getLogger(ErrorHandler::class.java)
|
private val log = createLogger<ErrorHandler>()
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun warning(ex: SAXParseException)= err(ex, Level.WARN)
|
override fun warning(ex: SAXParseException)= err(ex, Level.WARN)
|
||||||
|
@@ -34,6 +34,7 @@ dependencies {
|
|||||||
implementation catalog.jwo
|
implementation catalog.jwo
|
||||||
implementation catalog.slf4j.api
|
implementation catalog.slf4j.api
|
||||||
implementation catalog.netty.common
|
implementation catalog.netty.common
|
||||||
|
implementation catalog.netty.handler
|
||||||
implementation catalog.netty.codec.memcache
|
implementation catalog.netty.codec.memcache
|
||||||
|
|
||||||
bundle catalog.netty.codec.memcache
|
bundle catalog.netty.codec.memcache
|
||||||
|
@@ -11,6 +11,7 @@ module net.woggioni.rbcs.server.memcache {
|
|||||||
requires io.netty.codec.memcache;
|
requires io.netty.codec.memcache;
|
||||||
requires io.netty.common;
|
requires io.netty.common;
|
||||||
requires io.netty.buffer;
|
requires io.netty.buffer;
|
||||||
|
requires io.netty.handler;
|
||||||
requires org.slf4j;
|
requires org.slf4j;
|
||||||
|
|
||||||
provides CacheProvider with net.woggioni.rbcs.server.memcache.MemcacheCacheProvider;
|
provides CacheProvider with net.woggioni.rbcs.server.memcache.MemcacheCacheProvider;
|
||||||
|
@@ -1,23 +0,0 @@
|
|||||||
package net.woggioni.rbcs.server.memcache
|
|
||||||
|
|
||||||
import io.netty.buffer.ByteBuf
|
|
||||||
import net.woggioni.rbcs.api.Cache
|
|
||||||
import net.woggioni.rbcs.server.memcache.client.MemcacheClient
|
|
||||||
import java.nio.channels.ReadableByteChannel
|
|
||||||
import java.util.concurrent.CompletableFuture
|
|
||||||
|
|
||||||
class MemcacheCache(private val cfg : MemcacheCacheConfiguration) : Cache {
|
|
||||||
private val memcacheClient = MemcacheClient(cfg)
|
|
||||||
|
|
||||||
override fun get(key: String): CompletableFuture<ReadableByteChannel?> {
|
|
||||||
return memcacheClient.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun put(key: String, content: ByteBuf): CompletableFuture<Void> {
|
|
||||||
return memcacheClient.put(key, content, cfg.maxAge)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun close() {
|
|
||||||
memcacheClient.close()
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,23 +1,21 @@
|
|||||||
package net.woggioni.rbcs.server.memcache
|
package net.woggioni.rbcs.server.memcache
|
||||||
|
|
||||||
|
import net.woggioni.rbcs.api.CacheHandlerFactory
|
||||||
import net.woggioni.rbcs.api.Configuration
|
import net.woggioni.rbcs.api.Configuration
|
||||||
import net.woggioni.rbcs.common.HostAndPort
|
import net.woggioni.rbcs.common.HostAndPort
|
||||||
|
import net.woggioni.rbcs.server.memcache.client.MemcacheClient
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
|
|
||||||
data class MemcacheCacheConfiguration(
|
data class MemcacheCacheConfiguration(
|
||||||
val servers: List<Server>,
|
val servers: List<Server>,
|
||||||
val maxAge: Duration = Duration.ofDays(1),
|
val maxAge: Duration = Duration.ofDays(1),
|
||||||
val maxSize: Int = 0x100000,
|
|
||||||
val digestAlgorithm: String? = null,
|
val digestAlgorithm: String? = null,
|
||||||
val compressionMode: CompressionMode? = null,
|
val compressionMode: CompressionMode? = null,
|
||||||
|
val compressionLevel: Int,
|
||||||
|
val chunkSize : Int
|
||||||
) : Configuration.Cache {
|
) : Configuration.Cache {
|
||||||
|
|
||||||
enum class CompressionMode {
|
enum class CompressionMode {
|
||||||
/**
|
|
||||||
* Gzip mode
|
|
||||||
*/
|
|
||||||
GZIP,
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deflate mode
|
* Deflate mode
|
||||||
*/
|
*/
|
||||||
@@ -31,7 +29,14 @@ data class MemcacheCacheConfiguration(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
override fun materialize() = MemcacheCache(this)
|
override fun materialize() = object : CacheHandlerFactory {
|
||||||
|
private val client = MemcacheClient(this@MemcacheCacheConfiguration.servers, chunkSize)
|
||||||
|
override fun close() {
|
||||||
|
client.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun newHandler() = MemcacheCacheHandler(client, digestAlgorithm, compressionMode != null, compressionLevel, chunkSize, maxAge)
|
||||||
|
}
|
||||||
|
|
||||||
override fun getNamespaceURI() = "urn:net.woggioni.rbcs.server.memcache"
|
override fun getNamespaceURI() = "urn:net.woggioni.rbcs.server.memcache"
|
||||||
|
|
||||||
|
@@ -0,0 +1,409 @@
|
|||||||
|
package net.woggioni.rbcs.server.memcache
|
||||||
|
|
||||||
|
import io.netty.buffer.ByteBuf
|
||||||
|
import io.netty.buffer.ByteBufAllocator
|
||||||
|
import io.netty.buffer.CompositeByteBuf
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.SimpleChannelInboundHandler
|
||||||
|
import io.netty.handler.codec.memcache.DefaultLastMemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.DefaultMemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.LastMemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.MemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheOpcodes
|
||||||
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheResponse
|
||||||
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheResponseStatus
|
||||||
|
import io.netty.handler.codec.memcache.binary.DefaultBinaryMemcacheRequest
|
||||||
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
|
import net.woggioni.rbcs.api.exception.ContentTooLargeException
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheContent
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheGetRequest
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutRequest
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueNotFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.LastCacheContent
|
||||||
|
import net.woggioni.rbcs.common.ByteBufInputStream
|
||||||
|
import net.woggioni.rbcs.common.ByteBufOutputStream
|
||||||
|
import net.woggioni.rbcs.common.RBCS.processCacheKey
|
||||||
|
import net.woggioni.rbcs.common.RBCS.toIntOrNull
|
||||||
|
import net.woggioni.rbcs.common.createLogger
|
||||||
|
import net.woggioni.rbcs.common.debug
|
||||||
|
import net.woggioni.rbcs.common.extractChunk
|
||||||
|
import net.woggioni.rbcs.common.trace
|
||||||
|
import net.woggioni.rbcs.server.memcache.client.MemcacheClient
|
||||||
|
import net.woggioni.rbcs.server.memcache.client.MemcacheRequestController
|
||||||
|
import net.woggioni.rbcs.server.memcache.client.MemcacheResponseHandler
|
||||||
|
import java.io.ByteArrayOutputStream
|
||||||
|
import java.io.ObjectInputStream
|
||||||
|
import java.io.ObjectOutputStream
|
||||||
|
import java.nio.ByteBuffer
|
||||||
|
import java.nio.channels.Channels
|
||||||
|
import java.nio.channels.FileChannel
|
||||||
|
import java.nio.channels.ReadableByteChannel
|
||||||
|
import java.nio.file.Files
|
||||||
|
import java.nio.file.StandardOpenOption
|
||||||
|
import java.time.Duration
|
||||||
|
import java.time.Instant
|
||||||
|
import java.util.concurrent.CompletableFuture
|
||||||
|
import java.util.zip.Deflater
|
||||||
|
import java.util.zip.DeflaterOutputStream
|
||||||
|
import java.util.zip.InflaterOutputStream
|
||||||
|
import io.netty.channel.Channel as NettyChannel
|
||||||
|
|
||||||
|
class MemcacheCacheHandler(
|
||||||
|
private val client: MemcacheClient,
|
||||||
|
private val digestAlgorithm: String?,
|
||||||
|
private val compressionEnabled: Boolean,
|
||||||
|
private val compressionLevel: Int,
|
||||||
|
private val chunkSize: Int,
|
||||||
|
private val maxAge: Duration
|
||||||
|
) : SimpleChannelInboundHandler<CacheMessage>() {
|
||||||
|
companion object {
|
||||||
|
private val log = createLogger<MemcacheCacheHandler>()
|
||||||
|
|
||||||
|
private fun encodeExpiry(expiry: Duration): Int {
|
||||||
|
val expirySeconds = expiry.toSeconds()
|
||||||
|
return expirySeconds.toInt().takeIf { it.toLong() == expirySeconds }
|
||||||
|
?: Instant.ofEpochSecond(expirySeconds).epochSecond.toInt()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private inner class InProgressGetRequest(
|
||||||
|
private val key: String,
|
||||||
|
private val ctx: ChannelHandlerContext
|
||||||
|
) {
|
||||||
|
private val acc = ctx.alloc().compositeBuffer()
|
||||||
|
private val chunk = ctx.alloc().compositeBuffer()
|
||||||
|
private val outputStream = ByteBufOutputStream(chunk).let {
|
||||||
|
if (compressionEnabled) {
|
||||||
|
InflaterOutputStream(it)
|
||||||
|
} else {
|
||||||
|
it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private var responseSent = false
|
||||||
|
private var metadataSize: Int? = null
|
||||||
|
|
||||||
|
fun write(buf: ByteBuf) {
|
||||||
|
acc.addComponent(true, buf.retain())
|
||||||
|
if (metadataSize == null && acc.readableBytes() >= Int.SIZE_BYTES) {
|
||||||
|
metadataSize = acc.readInt()
|
||||||
|
}
|
||||||
|
metadataSize
|
||||||
|
?.takeIf { !responseSent }
|
||||||
|
?.takeIf { acc.readableBytes() >= it }
|
||||||
|
?.let { mSize ->
|
||||||
|
val metadata = ObjectInputStream(ByteBufInputStream(acc)).use {
|
||||||
|
acc.retain()
|
||||||
|
it.readObject() as CacheValueMetadata
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(CacheValueFoundResponse(key, metadata))
|
||||||
|
responseSent = true
|
||||||
|
acc.readerIndex(Int.SIZE_BYTES + mSize)
|
||||||
|
}
|
||||||
|
if (responseSent) {
|
||||||
|
acc.readBytes(outputStream, acc.readableBytes())
|
||||||
|
if(acc.readableBytes() >= chunkSize) {
|
||||||
|
flush(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun flush(last : Boolean) {
|
||||||
|
val toSend = extractChunk(chunk, ctx.alloc())
|
||||||
|
val msg = if(last) {
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Sending last chunk to client on channel ${ctx.channel().id().asShortText()}"
|
||||||
|
}
|
||||||
|
LastCacheContent(toSend)
|
||||||
|
} else {
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Sending chunk to client on channel ${ctx.channel().id().asShortText()}"
|
||||||
|
}
|
||||||
|
CacheContent(toSend)
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fun commit() {
|
||||||
|
acc.release()
|
||||||
|
chunk.retain()
|
||||||
|
outputStream.close()
|
||||||
|
flush(true)
|
||||||
|
chunk.release()
|
||||||
|
}
|
||||||
|
|
||||||
|
fun rollback() {
|
||||||
|
acc.release()
|
||||||
|
outputStream.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private inner class InProgressPutRequest(
|
||||||
|
private val ch : NettyChannel,
|
||||||
|
metadata : CacheValueMetadata,
|
||||||
|
val digest : ByteBuf,
|
||||||
|
val requestController: CompletableFuture<MemcacheRequestController>,
|
||||||
|
private val alloc: ByteBufAllocator
|
||||||
|
) {
|
||||||
|
private var totalSize = 0
|
||||||
|
private var tmpFile : FileChannel? = null
|
||||||
|
private val accumulator = alloc.compositeBuffer()
|
||||||
|
private val stream = ByteBufOutputStream(accumulator).let {
|
||||||
|
if (compressionEnabled) {
|
||||||
|
DeflaterOutputStream(it, Deflater(compressionLevel))
|
||||||
|
} else {
|
||||||
|
it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
init {
|
||||||
|
ByteArrayOutputStream().let { baos ->
|
||||||
|
ObjectOutputStream(baos).use {
|
||||||
|
it.writeObject(metadata)
|
||||||
|
}
|
||||||
|
val serializedBytes = baos.toByteArray()
|
||||||
|
accumulator.writeInt(serializedBytes.size)
|
||||||
|
accumulator.writeBytes(serializedBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fun write(buf: ByteBuf) {
|
||||||
|
totalSize += buf.readableBytes()
|
||||||
|
buf.readBytes(stream, buf.readableBytes())
|
||||||
|
tmpFile?.let {
|
||||||
|
flushToDisk(it, accumulator)
|
||||||
|
}
|
||||||
|
if(accumulator.readableBytes() > 0x100000) {
|
||||||
|
log.debug(ch) {
|
||||||
|
"Entry is too big, buffering it into a file"
|
||||||
|
}
|
||||||
|
val opts = arrayOf(
|
||||||
|
StandardOpenOption.DELETE_ON_CLOSE,
|
||||||
|
StandardOpenOption.READ,
|
||||||
|
StandardOpenOption.WRITE,
|
||||||
|
StandardOpenOption.TRUNCATE_EXISTING
|
||||||
|
)
|
||||||
|
FileChannel.open(Files.createTempFile("rbcs-memcache", ".tmp"), *opts).let { fc ->
|
||||||
|
tmpFile = fc
|
||||||
|
flushToDisk(fc, accumulator)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun flushToDisk(fc : FileChannel, buf : CompositeByteBuf) {
|
||||||
|
val chunk = extractChunk(buf, alloc)
|
||||||
|
fc.write(chunk.nioBuffer())
|
||||||
|
chunk.release()
|
||||||
|
}
|
||||||
|
|
||||||
|
fun commit() : Pair<Int, ReadableByteChannel> {
|
||||||
|
digest.release()
|
||||||
|
accumulator.retain()
|
||||||
|
stream.close()
|
||||||
|
val fileChannel = tmpFile
|
||||||
|
return if(fileChannel != null) {
|
||||||
|
flushToDisk(fileChannel, accumulator)
|
||||||
|
accumulator.release()
|
||||||
|
fileChannel.position(0)
|
||||||
|
val fileSize = fileChannel.size().toIntOrNull() ?: let {
|
||||||
|
fileChannel.close()
|
||||||
|
throw ContentTooLargeException("Request body is too large", null)
|
||||||
|
}
|
||||||
|
fileSize to fileChannel
|
||||||
|
} else {
|
||||||
|
accumulator.readableBytes() to Channels.newChannel(ByteBufInputStream(accumulator))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fun rollback() {
|
||||||
|
stream.close()
|
||||||
|
digest.release()
|
||||||
|
tmpFile?.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private var inProgressPutRequest: InProgressPutRequest? = null
|
||||||
|
private var inProgressGetRequest: InProgressGetRequest? = null
|
||||||
|
|
||||||
|
override fun channelRead0(ctx: ChannelHandlerContext, msg: CacheMessage) {
|
||||||
|
when (msg) {
|
||||||
|
is CacheGetRequest -> handleGetRequest(ctx, msg)
|
||||||
|
is CachePutRequest -> handlePutRequest(ctx, msg)
|
||||||
|
is LastCacheContent -> handleLastCacheContent(ctx, msg)
|
||||||
|
is CacheContent -> handleCacheContent(ctx, msg)
|
||||||
|
else -> ctx.fireChannelRead(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleGetRequest(ctx: ChannelHandlerContext, msg: CacheGetRequest) {
|
||||||
|
log.debug(ctx) {
|
||||||
|
"Fetching ${msg.key} from memcache"
|
||||||
|
}
|
||||||
|
val key = ctx.alloc().buffer().also {
|
||||||
|
it.writeBytes(processCacheKey(msg.key, digestAlgorithm))
|
||||||
|
}
|
||||||
|
val responseHandler = object : MemcacheResponseHandler {
|
||||||
|
override fun responseReceived(response: BinaryMemcacheResponse) {
|
||||||
|
val status = response.status()
|
||||||
|
when (status) {
|
||||||
|
BinaryMemcacheResponseStatus.SUCCESS -> {
|
||||||
|
log.debug(ctx) {
|
||||||
|
"Cache hit for key ${msg.key} on memcache"
|
||||||
|
}
|
||||||
|
inProgressGetRequest = InProgressGetRequest(msg.key, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
BinaryMemcacheResponseStatus.KEY_ENOENT -> {
|
||||||
|
log.debug(ctx) {
|
||||||
|
"Cache miss for key ${msg.key} on memcache"
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(CacheValueNotFoundResponse())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun contentReceived(content: MemcacheContent) {
|
||||||
|
log.trace(ctx) {
|
||||||
|
"${if(content is LastMemcacheContent) "Last chunk" else "Chunk"} of ${content.content().readableBytes()} bytes received from memcache for key ${msg.key}"
|
||||||
|
}
|
||||||
|
inProgressGetRequest?.write(content.content())
|
||||||
|
if (content is LastMemcacheContent) {
|
||||||
|
inProgressGetRequest?.commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ex: Throwable) {
|
||||||
|
inProgressGetRequest?.let {
|
||||||
|
inProgressGetRequest = null
|
||||||
|
it.rollback()
|
||||||
|
}
|
||||||
|
this@MemcacheCacheHandler.exceptionCaught(ctx, ex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
client.sendRequest(key.retainedDuplicate(), responseHandler).thenAccept { requestHandle ->
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Sending GET request for key ${msg.key} to memcache"
|
||||||
|
}
|
||||||
|
val request = DefaultBinaryMemcacheRequest(key).apply {
|
||||||
|
setOpcode(BinaryMemcacheOpcodes.GET)
|
||||||
|
}
|
||||||
|
requestHandle.sendRequest(request)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handlePutRequest(ctx: ChannelHandlerContext, msg: CachePutRequest) {
|
||||||
|
val key = ctx.alloc().buffer().also {
|
||||||
|
it.writeBytes(processCacheKey(msg.key, digestAlgorithm))
|
||||||
|
}
|
||||||
|
val responseHandler = object : MemcacheResponseHandler {
|
||||||
|
override fun responseReceived(response: BinaryMemcacheResponse) {
|
||||||
|
val status = response.status()
|
||||||
|
when (status) {
|
||||||
|
BinaryMemcacheResponseStatus.SUCCESS -> {
|
||||||
|
log.debug(ctx) {
|
||||||
|
"Inserted key ${msg.key} into memcache"
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(CachePutResponse(msg.key))
|
||||||
|
}
|
||||||
|
else -> this@MemcacheCacheHandler.exceptionCaught(ctx, MemcacheException(status))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun contentReceived(content: MemcacheContent) {}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ex: Throwable) {
|
||||||
|
this@MemcacheCacheHandler.exceptionCaught(ctx, ex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val requestController = client.sendRequest(key.retainedDuplicate(), responseHandler).whenComplete { _, ex ->
|
||||||
|
ex?.let {
|
||||||
|
this@MemcacheCacheHandler.exceptionCaught(ctx, ex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inProgressPutRequest = InProgressPutRequest(ctx.channel(), msg.metadata, key, requestController, ctx.alloc())
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleCacheContent(ctx: ChannelHandlerContext, msg: CacheContent) {
|
||||||
|
inProgressPutRequest?.let { request ->
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Received chunk of ${msg.content().readableBytes()} bytes for memcache"
|
||||||
|
}
|
||||||
|
request.write(msg.content())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleLastCacheContent(ctx: ChannelHandlerContext, msg: LastCacheContent) {
|
||||||
|
inProgressPutRequest?.let { request ->
|
||||||
|
inProgressPutRequest = null
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Received last chunk of ${msg.content().readableBytes()} bytes for memcache"
|
||||||
|
}
|
||||||
|
request.write(msg.content())
|
||||||
|
val key = request.digest.retainedDuplicate()
|
||||||
|
val (payloadSize, payloadSource) = request.commit()
|
||||||
|
val extras = ctx.alloc().buffer(8, 8)
|
||||||
|
extras.writeInt(0)
|
||||||
|
extras.writeInt(encodeExpiry(maxAge))
|
||||||
|
val totalBodyLength = request.digest.readableBytes() + extras.readableBytes() + payloadSize
|
||||||
|
request.requestController.whenComplete { requestController, ex ->
|
||||||
|
if(ex == null) {
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Sending SET request to memcache"
|
||||||
|
}
|
||||||
|
requestController.sendRequest(DefaultBinaryMemcacheRequest().apply {
|
||||||
|
setOpcode(BinaryMemcacheOpcodes.SET)
|
||||||
|
setKey(key)
|
||||||
|
setExtras(extras)
|
||||||
|
setTotalBodyLength(totalBodyLength)
|
||||||
|
})
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Sending request payload to memcache"
|
||||||
|
}
|
||||||
|
payloadSource.use { source ->
|
||||||
|
val bb = ByteBuffer.allocate(chunkSize)
|
||||||
|
while (true) {
|
||||||
|
val read = source.read(bb)
|
||||||
|
bb.limit()
|
||||||
|
if(read >= 0 && bb.position() < chunkSize && bb.hasRemaining()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
val chunk = ctx.alloc().buffer(chunkSize)
|
||||||
|
bb.flip()
|
||||||
|
chunk.writeBytes(bb)
|
||||||
|
bb.clear()
|
||||||
|
log.trace(ctx) {
|
||||||
|
"Sending ${chunk.readableBytes()} bytes chunk to memcache"
|
||||||
|
}
|
||||||
|
if(read < 0) {
|
||||||
|
requestController.sendContent(DefaultLastMemcacheContent(chunk))
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
requestController.sendContent(DefaultMemcacheContent(chunk))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
payloadSource.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
|
inProgressGetRequest?.let {
|
||||||
|
inProgressGetRequest = null
|
||||||
|
it.rollback()
|
||||||
|
}
|
||||||
|
inProgressPutRequest?.let {
|
||||||
|
inProgressPutRequest = null
|
||||||
|
it.requestController.thenAccept { controller ->
|
||||||
|
controller.exceptionCaught(cause)
|
||||||
|
}
|
||||||
|
it.rollback()
|
||||||
|
}
|
||||||
|
super.exceptionCaught(ctx, cause)
|
||||||
|
}
|
||||||
|
}
|
@@ -2,8 +2,8 @@ package net.woggioni.rbcs.server.memcache
|
|||||||
|
|
||||||
import net.woggioni.rbcs.api.CacheProvider
|
import net.woggioni.rbcs.api.CacheProvider
|
||||||
import net.woggioni.rbcs.api.exception.ConfigurationException
|
import net.woggioni.rbcs.api.exception.ConfigurationException
|
||||||
import net.woggioni.rbcs.common.RBCS
|
|
||||||
import net.woggioni.rbcs.common.HostAndPort
|
import net.woggioni.rbcs.common.HostAndPort
|
||||||
|
import net.woggioni.rbcs.common.RBCS
|
||||||
import net.woggioni.rbcs.common.Xml
|
import net.woggioni.rbcs.common.Xml
|
||||||
import net.woggioni.rbcs.common.Xml.Companion.asIterable
|
import net.woggioni.rbcs.common.Xml.Companion.asIterable
|
||||||
import net.woggioni.rbcs.common.Xml.Companion.renderAttribute
|
import net.woggioni.rbcs.common.Xml.Companion.renderAttribute
|
||||||
@@ -28,18 +28,19 @@ class MemcacheCacheProvider : CacheProvider<MemcacheCacheConfiguration> {
|
|||||||
val maxAge = el.renderAttribute("max-age")
|
val maxAge = el.renderAttribute("max-age")
|
||||||
?.let(Duration::parse)
|
?.let(Duration::parse)
|
||||||
?: Duration.ofDays(1)
|
?: Duration.ofDays(1)
|
||||||
val maxSize = el.renderAttribute("max-size")
|
val chunkSize = el.renderAttribute("chunk-size")
|
||||||
?.let(String::toInt)
|
?.let(Integer::decode)
|
||||||
?: 0x100000
|
?: 0x10000
|
||||||
|
val compressionLevel = el.renderAttribute("compression-level")
|
||||||
|
?.let(Integer::decode)
|
||||||
|
?: -1
|
||||||
val compressionMode = el.renderAttribute("compression-mode")
|
val compressionMode = el.renderAttribute("compression-mode")
|
||||||
?.let {
|
?.let {
|
||||||
when (it) {
|
when (it) {
|
||||||
"gzip" -> MemcacheCacheConfiguration.CompressionMode.GZIP
|
|
||||||
"deflate" -> MemcacheCacheConfiguration.CompressionMode.DEFLATE
|
"deflate" -> MemcacheCacheConfiguration.CompressionMode.DEFLATE
|
||||||
else -> MemcacheCacheConfiguration.CompressionMode.DEFLATE
|
else -> MemcacheCacheConfiguration.CompressionMode.DEFLATE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
?: MemcacheCacheConfiguration.CompressionMode.DEFLATE
|
|
||||||
val digestAlgorithm = el.renderAttribute("digest")
|
val digestAlgorithm = el.renderAttribute("digest")
|
||||||
for (child in el.asIterable()) {
|
for (child in el.asIterable()) {
|
||||||
when (child.nodeName) {
|
when (child.nodeName) {
|
||||||
@@ -60,9 +61,10 @@ class MemcacheCacheProvider : CacheProvider<MemcacheCacheConfiguration> {
|
|||||||
return MemcacheCacheConfiguration(
|
return MemcacheCacheConfiguration(
|
||||||
servers,
|
servers,
|
||||||
maxAge,
|
maxAge,
|
||||||
maxSize,
|
|
||||||
digestAlgorithm,
|
digestAlgorithm,
|
||||||
compressionMode,
|
compressionMode,
|
||||||
|
compressionLevel,
|
||||||
|
chunkSize
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +72,6 @@ class MemcacheCacheProvider : CacheProvider<MemcacheCacheConfiguration> {
|
|||||||
val result = doc.createElement("cache")
|
val result = doc.createElement("cache")
|
||||||
Xml.of(doc, result) {
|
Xml.of(doc, result) {
|
||||||
attr("xmlns:${xmlNamespacePrefix}", xmlNamespace, namespaceURI = "http://www.w3.org/2000/xmlns/")
|
attr("xmlns:${xmlNamespacePrefix}", xmlNamespace, namespaceURI = "http://www.w3.org/2000/xmlns/")
|
||||||
|
|
||||||
attr("xs:type", "${xmlNamespacePrefix}:$xmlType", RBCS.XML_SCHEMA_NAMESPACE_URI)
|
attr("xs:type", "${xmlNamespacePrefix}:$xmlType", RBCS.XML_SCHEMA_NAMESPACE_URI)
|
||||||
for (server in servers) {
|
for (server in servers) {
|
||||||
node("server") {
|
node("server") {
|
||||||
@@ -83,18 +84,18 @@ class MemcacheCacheProvider : CacheProvider<MemcacheCacheConfiguration> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
attr("max-age", maxAge.toString())
|
attr("max-age", maxAge.toString())
|
||||||
attr("max-size", maxSize.toString())
|
attr("chunk-size", chunkSize.toString())
|
||||||
digestAlgorithm?.let { digestAlgorithm ->
|
digestAlgorithm?.let { digestAlgorithm ->
|
||||||
attr("digest", digestAlgorithm)
|
attr("digest", digestAlgorithm)
|
||||||
}
|
}
|
||||||
compressionMode?.let { compressionMode ->
|
compressionMode?.let { compressionMode ->
|
||||||
attr(
|
attr(
|
||||||
"compression-mode", when (compressionMode) {
|
"compression-mode", when (compressionMode) {
|
||||||
MemcacheCacheConfiguration.CompressionMode.GZIP -> "gzip"
|
|
||||||
MemcacheCacheConfiguration.CompressionMode.DEFLATE -> "deflate"
|
MemcacheCacheConfiguration.CompressionMode.DEFLATE -> "deflate"
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
attr("compression-level", compressionLevel.toString())
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
@@ -3,8 +3,8 @@ package net.woggioni.rbcs.server.memcache.client
|
|||||||
|
|
||||||
import io.netty.bootstrap.Bootstrap
|
import io.netty.bootstrap.Bootstrap
|
||||||
import io.netty.buffer.ByteBuf
|
import io.netty.buffer.ByteBuf
|
||||||
import io.netty.buffer.Unpooled
|
|
||||||
import io.netty.channel.Channel
|
import io.netty.channel.Channel
|
||||||
|
import io.netty.channel.ChannelFutureListener
|
||||||
import io.netty.channel.ChannelHandlerContext
|
import io.netty.channel.ChannelHandlerContext
|
||||||
import io.netty.channel.ChannelOption
|
import io.netty.channel.ChannelOption
|
||||||
import io.netty.channel.ChannelPipeline
|
import io.netty.channel.ChannelPipeline
|
||||||
@@ -14,44 +14,29 @@ import io.netty.channel.pool.AbstractChannelPoolHandler
|
|||||||
import io.netty.channel.pool.ChannelPool
|
import io.netty.channel.pool.ChannelPool
|
||||||
import io.netty.channel.pool.FixedChannelPool
|
import io.netty.channel.pool.FixedChannelPool
|
||||||
import io.netty.channel.socket.nio.NioSocketChannel
|
import io.netty.channel.socket.nio.NioSocketChannel
|
||||||
import io.netty.handler.codec.DecoderException
|
import io.netty.handler.codec.memcache.LastMemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.MemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.MemcacheObject
|
||||||
import io.netty.handler.codec.memcache.binary.BinaryMemcacheClientCodec
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheClientCodec
|
||||||
import io.netty.handler.codec.memcache.binary.BinaryMemcacheObjectAggregator
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheRequest
|
||||||
import io.netty.handler.codec.memcache.binary.BinaryMemcacheOpcodes
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheResponse
|
||||||
import io.netty.handler.codec.memcache.binary.BinaryMemcacheResponseStatus
|
|
||||||
import io.netty.handler.codec.memcache.binary.DefaultFullBinaryMemcacheRequest
|
|
||||||
import io.netty.handler.codec.memcache.binary.FullBinaryMemcacheRequest
|
|
||||||
import io.netty.handler.codec.memcache.binary.FullBinaryMemcacheResponse
|
|
||||||
import io.netty.util.concurrent.GenericFutureListener
|
import io.netty.util.concurrent.GenericFutureListener
|
||||||
import net.woggioni.rbcs.common.ByteBufInputStream
|
|
||||||
import net.woggioni.rbcs.common.ByteBufOutputStream
|
|
||||||
import net.woggioni.rbcs.common.RBCS.digest
|
|
||||||
import net.woggioni.rbcs.common.HostAndPort
|
import net.woggioni.rbcs.common.HostAndPort
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
|
import net.woggioni.rbcs.common.warn
|
||||||
import net.woggioni.rbcs.server.memcache.MemcacheCacheConfiguration
|
import net.woggioni.rbcs.server.memcache.MemcacheCacheConfiguration
|
||||||
import net.woggioni.rbcs.server.memcache.MemcacheException
|
import net.woggioni.rbcs.server.memcache.MemcacheCacheHandler
|
||||||
import net.woggioni.jwo.JWO
|
import java.io.IOException
|
||||||
import java.net.InetSocketAddress
|
import java.net.InetSocketAddress
|
||||||
import java.nio.channels.Channels
|
|
||||||
import java.nio.channels.ReadableByteChannel
|
|
||||||
import java.security.MessageDigest
|
|
||||||
import java.time.Duration
|
|
||||||
import java.time.Instant
|
|
||||||
import java.util.concurrent.CompletableFuture
|
import java.util.concurrent.CompletableFuture
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
import java.util.zip.Deflater
|
|
||||||
import java.util.zip.DeflaterOutputStream
|
|
||||||
import java.util.zip.GZIPInputStream
|
|
||||||
import java.util.zip.GZIPOutputStream
|
|
||||||
import java.util.zip.InflaterInputStream
|
|
||||||
import io.netty.util.concurrent.Future as NettyFuture
|
import io.netty.util.concurrent.Future as NettyFuture
|
||||||
|
|
||||||
|
|
||||||
class MemcacheClient(private val cfg: MemcacheCacheConfiguration) : AutoCloseable {
|
class MemcacheClient(private val servers: List<MemcacheCacheConfiguration.Server>, private val chunkSize : Int) : AutoCloseable {
|
||||||
|
|
||||||
private companion object {
|
private companion object {
|
||||||
@JvmStatic
|
private val log = createLogger<MemcacheCacheHandler>()
|
||||||
private val log = contextLogger()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private val group: NioEventLoopGroup
|
private val group: NioEventLoopGroup
|
||||||
@@ -75,35 +60,33 @@ class MemcacheClient(private val cfg: MemcacheCacheConfiguration) : AutoCloseabl
|
|||||||
|
|
||||||
override fun channelCreated(ch: Channel) {
|
override fun channelCreated(ch: Channel) {
|
||||||
val pipeline: ChannelPipeline = ch.pipeline()
|
val pipeline: ChannelPipeline = ch.pipeline()
|
||||||
pipeline.addLast(BinaryMemcacheClientCodec())
|
pipeline.addLast(BinaryMemcacheClientCodec(chunkSize, true))
|
||||||
pipeline.addLast(BinaryMemcacheObjectAggregator(Integer.MAX_VALUE))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return FixedChannelPool(bootstrap, channelPoolHandler, server.maxConnections)
|
return FixedChannelPool(bootstrap, channelPoolHandler, server.maxConnections)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun sendRequest(
|
||||||
private fun sendRequest(request: FullBinaryMemcacheRequest): CompletableFuture<FullBinaryMemcacheResponse> {
|
key: ByteBuf,
|
||||||
|
responseHandler: MemcacheResponseHandler
|
||||||
val server = cfg.servers.let { servers ->
|
): CompletableFuture<MemcacheRequestController> {
|
||||||
if (servers.size > 1) {
|
val server = if (servers.size > 1) {
|
||||||
val key = request.key().duplicate()
|
var checksum = 0
|
||||||
var checksum = 0
|
while (key.readableBytes() > 4) {
|
||||||
while (key.readableBytes() > 4) {
|
val byte = key.readInt()
|
||||||
val byte = key.readInt()
|
checksum = checksum xor byte
|
||||||
checksum = checksum xor byte
|
|
||||||
}
|
|
||||||
while (key.readableBytes() > 0) {
|
|
||||||
val byte = key.readByte()
|
|
||||||
checksum = checksum xor byte.toInt()
|
|
||||||
}
|
|
||||||
servers[checksum % servers.size]
|
|
||||||
} else {
|
|
||||||
servers.first()
|
|
||||||
}
|
}
|
||||||
|
while (key.readableBytes() > 0) {
|
||||||
|
val byte = key.readByte()
|
||||||
|
checksum = checksum xor byte.toInt()
|
||||||
|
}
|
||||||
|
servers[checksum % servers.size]
|
||||||
|
} else {
|
||||||
|
servers.first()
|
||||||
}
|
}
|
||||||
|
key.release()
|
||||||
|
|
||||||
val response = CompletableFuture<FullBinaryMemcacheResponse>()
|
val response = CompletableFuture<MemcacheRequestController>()
|
||||||
// Custom handler for processing responses
|
// Custom handler for processing responses
|
||||||
val pool = connectionPool.computeIfAbsent(server.endpoint) {
|
val pool = connectionPool.computeIfAbsent(server.endpoint) {
|
||||||
newConnectionPool(server)
|
newConnectionPool(server)
|
||||||
@@ -111,33 +94,108 @@ class MemcacheClient(private val cfg: MemcacheCacheConfiguration) : AutoCloseabl
|
|||||||
pool.acquire().addListener(object : GenericFutureListener<NettyFuture<Channel>> {
|
pool.acquire().addListener(object : GenericFutureListener<NettyFuture<Channel>> {
|
||||||
override fun operationComplete(channelFuture: NettyFuture<Channel>) {
|
override fun operationComplete(channelFuture: NettyFuture<Channel>) {
|
||||||
if (channelFuture.isSuccess) {
|
if (channelFuture.isSuccess) {
|
||||||
val channel = channelFuture.now
|
|
||||||
val pipeline = channel.pipeline()
|
|
||||||
channel.pipeline()
|
|
||||||
.addLast("client-handler", object : SimpleChannelInboundHandler<FullBinaryMemcacheResponse>() {
|
|
||||||
override fun channelRead0(
|
|
||||||
ctx: ChannelHandlerContext,
|
|
||||||
msg: FullBinaryMemcacheResponse
|
|
||||||
) {
|
|
||||||
pipeline.removeLast()
|
|
||||||
pool.release(channel)
|
|
||||||
msg.touch("The method's caller must remember to release this")
|
|
||||||
response.complete(msg.retain())
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
var requestSent = false
|
||||||
val ex = when (cause) {
|
var requestBodySent = false
|
||||||
is DecoderException -> cause.cause!!
|
var requestFinished = false
|
||||||
else -> cause
|
var responseReceived = false
|
||||||
}
|
var responseBodyReceived = false
|
||||||
ctx.close()
|
var responseFinished = false
|
||||||
pipeline.removeLast()
|
var requestBodySize = 0
|
||||||
pool.release(channel)
|
var requestBodyBytesSent = 0
|
||||||
response.completeExceptionally(ex)
|
|
||||||
|
|
||||||
|
|
||||||
|
val channel = channelFuture.now
|
||||||
|
var connectionClosedByTheRemoteServer = true
|
||||||
|
val closeCallback = {
|
||||||
|
if (connectionClosedByTheRemoteServer) {
|
||||||
|
val ex = IOException("The memcache server closed the connection")
|
||||||
|
val completed = response.completeExceptionally(ex)
|
||||||
|
if(!completed) responseHandler.exceptionCaught(ex)
|
||||||
|
log.warn {
|
||||||
|
"RequestSent: $requestSent, RequestBodySent: $requestBodySent, " +
|
||||||
|
"RequestFinished: $requestFinished, ResponseReceived: $responseReceived, " +
|
||||||
|
"ResponseBodyReceived: $responseBodyReceived, ResponseFinished: $responseFinished, " +
|
||||||
|
"RequestBodySize: $requestBodySize, RequestBodyBytesSent: $requestBodyBytesSent"
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
request.touch()
|
pool.release(channel)
|
||||||
channel.writeAndFlush(request)
|
}
|
||||||
|
val closeListener = ChannelFutureListener {
|
||||||
|
closeCallback()
|
||||||
|
}
|
||||||
|
channel.closeFuture().addListener(closeListener)
|
||||||
|
val pipeline = channel.pipeline()
|
||||||
|
val handler = object : SimpleChannelInboundHandler<MemcacheObject>() {
|
||||||
|
|
||||||
|
override fun handlerAdded(ctx: ChannelHandlerContext) {
|
||||||
|
channel.closeFuture().removeListener(closeListener)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun channelRead0(
|
||||||
|
ctx: ChannelHandlerContext,
|
||||||
|
msg: MemcacheObject
|
||||||
|
) {
|
||||||
|
when (msg) {
|
||||||
|
is BinaryMemcacheResponse -> {
|
||||||
|
responseHandler.responseReceived(msg)
|
||||||
|
responseReceived = true
|
||||||
|
}
|
||||||
|
|
||||||
|
is LastMemcacheContent -> {
|
||||||
|
responseFinished = true
|
||||||
|
responseHandler.contentReceived(msg)
|
||||||
|
pipeline.remove(this)
|
||||||
|
pool.release(channel)
|
||||||
|
}
|
||||||
|
|
||||||
|
is MemcacheContent -> {
|
||||||
|
responseBodyReceived = true
|
||||||
|
responseHandler.contentReceived(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun channelInactive(ctx: ChannelHandlerContext) {
|
||||||
|
closeCallback()
|
||||||
|
ctx.fireChannelInactive()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
|
connectionClosedByTheRemoteServer = false
|
||||||
|
ctx.close()
|
||||||
|
pool.release(channel)
|
||||||
|
responseHandler.exceptionCaught(cause)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
channel.pipeline()
|
||||||
|
.addLast("client-handler", handler)
|
||||||
|
response.complete(object : MemcacheRequestController {
|
||||||
|
|
||||||
|
override fun sendRequest(request: BinaryMemcacheRequest) {
|
||||||
|
requestBodySize = request.totalBodyLength() - request.keyLength() - request.extrasLength()
|
||||||
|
channel.writeAndFlush(request)
|
||||||
|
requestSent = true
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun sendContent(content: MemcacheContent) {
|
||||||
|
val size = content.content().readableBytes()
|
||||||
|
channel.writeAndFlush(content).addListener {
|
||||||
|
requestBodyBytesSent += size
|
||||||
|
requestBodySent = true
|
||||||
|
if(content is LastMemcacheContent) {
|
||||||
|
requestFinished = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ex: Throwable) {
|
||||||
|
connectionClosedByTheRemoteServer = false
|
||||||
|
channel.close()
|
||||||
|
}
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
response.completeExceptionally(channelFuture.cause())
|
response.completeExceptionally(channelFuture.cause())
|
||||||
}
|
}
|
||||||
@@ -146,107 +204,6 @@ class MemcacheClient(private val cfg: MemcacheCacheConfiguration) : AutoCloseabl
|
|||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun encodeExpiry(expiry: Duration): Int {
|
|
||||||
val expirySeconds = expiry.toSeconds()
|
|
||||||
return expirySeconds.toInt().takeIf { it.toLong() == expirySeconds }
|
|
||||||
?: Instant.ofEpochSecond(expirySeconds).epochSecond.toInt()
|
|
||||||
}
|
|
||||||
|
|
||||||
fun get(key: String): CompletableFuture<ReadableByteChannel?> {
|
|
||||||
val request = (cfg.digestAlgorithm
|
|
||||||
?.let(MessageDigest::getInstance)
|
|
||||||
?.let { md ->
|
|
||||||
digest(key.toByteArray(), md)
|
|
||||||
} ?: key.toByteArray(Charsets.UTF_8)).let { digest ->
|
|
||||||
DefaultFullBinaryMemcacheRequest(Unpooled.wrappedBuffer(digest), null).apply {
|
|
||||||
setOpcode(BinaryMemcacheOpcodes.GET)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sendRequest(request).thenApply { response ->
|
|
||||||
try {
|
|
||||||
when (val status = response.status()) {
|
|
||||||
BinaryMemcacheResponseStatus.SUCCESS -> {
|
|
||||||
val compressionMode = cfg.compressionMode
|
|
||||||
val content = response.content().retain()
|
|
||||||
content.touch()
|
|
||||||
if (compressionMode != null) {
|
|
||||||
when (compressionMode) {
|
|
||||||
MemcacheCacheConfiguration.CompressionMode.GZIP -> {
|
|
||||||
GZIPInputStream(ByteBufInputStream(content))
|
|
||||||
}
|
|
||||||
|
|
||||||
MemcacheCacheConfiguration.CompressionMode.DEFLATE -> {
|
|
||||||
InflaterInputStream(ByteBufInputStream(content))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ByteBufInputStream(content)
|
|
||||||
}.let(Channels::newChannel)
|
|
||||||
}
|
|
||||||
|
|
||||||
BinaryMemcacheResponseStatus.KEY_ENOENT -> {
|
|
||||||
null
|
|
||||||
}
|
|
||||||
|
|
||||||
else -> throw MemcacheException(status)
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
response.release()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fun put(key: String, content: ByteBuf, expiry: Duration, cas: Long? = null): CompletableFuture<Void> {
|
|
||||||
val request = (cfg.digestAlgorithm
|
|
||||||
?.let(MessageDigest::getInstance)
|
|
||||||
?.let { md ->
|
|
||||||
digest(key.toByteArray(), md)
|
|
||||||
} ?: key.toByteArray(Charsets.UTF_8)).let { digest ->
|
|
||||||
val extras = Unpooled.buffer(8, 8)
|
|
||||||
extras.writeInt(0)
|
|
||||||
extras.writeInt(encodeExpiry(expiry))
|
|
||||||
val compressionMode = cfg.compressionMode
|
|
||||||
content.retain()
|
|
||||||
val payload = if (compressionMode != null) {
|
|
||||||
val inputStream = ByteBufInputStream(content)
|
|
||||||
val buf = content.alloc().buffer()
|
|
||||||
buf.retain()
|
|
||||||
val outputStream = when (compressionMode) {
|
|
||||||
MemcacheCacheConfiguration.CompressionMode.GZIP -> {
|
|
||||||
GZIPOutputStream(ByteBufOutputStream(buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
MemcacheCacheConfiguration.CompressionMode.DEFLATE -> {
|
|
||||||
DeflaterOutputStream(ByteBufOutputStream(buf), Deflater(Deflater.DEFAULT_COMPRESSION, false))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
inputStream.use { i ->
|
|
||||||
outputStream.use { o ->
|
|
||||||
JWO.copy(i, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf
|
|
||||||
} else {
|
|
||||||
content
|
|
||||||
}
|
|
||||||
DefaultFullBinaryMemcacheRequest(Unpooled.wrappedBuffer(digest), extras, payload).apply {
|
|
||||||
setOpcode(BinaryMemcacheOpcodes.SET)
|
|
||||||
cas?.let(this::setCas)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sendRequest(request).thenApply { response ->
|
|
||||||
try {
|
|
||||||
when (val status = response.status()) {
|
|
||||||
BinaryMemcacheResponseStatus.SUCCESS -> null
|
|
||||||
else -> throw MemcacheException(status)
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
response.release()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fun shutDown(): NettyFuture<*> {
|
fun shutDown(): NettyFuture<*> {
|
||||||
return group.shutdownGracefully()
|
return group.shutdownGracefully()
|
||||||
}
|
}
|
||||||
|
@@ -0,0 +1,13 @@
|
|||||||
|
package net.woggioni.rbcs.server.memcache.client
|
||||||
|
|
||||||
|
import io.netty.handler.codec.memcache.MemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheRequest
|
||||||
|
|
||||||
|
interface MemcacheRequestController {
|
||||||
|
|
||||||
|
fun sendRequest(request : BinaryMemcacheRequest)
|
||||||
|
|
||||||
|
fun sendContent(content : MemcacheContent)
|
||||||
|
|
||||||
|
fun exceptionCaught(ex : Throwable)
|
||||||
|
}
|
@@ -0,0 +1,14 @@
|
|||||||
|
package net.woggioni.rbcs.server.memcache.client
|
||||||
|
|
||||||
|
import io.netty.handler.codec.memcache.MemcacheContent
|
||||||
|
import io.netty.handler.codec.memcache.binary.BinaryMemcacheResponse
|
||||||
|
|
||||||
|
interface MemcacheResponseHandler {
|
||||||
|
|
||||||
|
|
||||||
|
fun responseReceived(response : BinaryMemcacheResponse)
|
||||||
|
|
||||||
|
fun contentReceived(content : MemcacheContent)
|
||||||
|
|
||||||
|
fun exceptionCaught(ex : Throwable)
|
||||||
|
}
|
@@ -20,9 +20,10 @@
|
|||||||
<xs:element name="server" type="rbcs-memcache:memcacheServerType"/>
|
<xs:element name="server" type="rbcs-memcache:memcacheServerType"/>
|
||||||
</xs:sequence>
|
</xs:sequence>
|
||||||
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
|
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
|
||||||
<xs:attribute name="max-size" type="xs:unsignedInt" default="1048576"/>
|
<xs:attribute name="chunk-size" type="rbcs:byteSizeType" default="0x10000"/>
|
||||||
<xs:attribute name="digest" type="xs:token" />
|
<xs:attribute name="digest" type="xs:token" />
|
||||||
<xs:attribute name="compression-mode" type="rbcs-memcache:compressionType"/>
|
<xs:attribute name="compression-mode" type="rbcs-memcache:compressionType"/>
|
||||||
|
<xs:attribute name="compression-level" type="rbcs:compressionLevelType" default="-1"/>
|
||||||
</xs:extension>
|
</xs:extension>
|
||||||
</xs:complexContent>
|
</xs:complexContent>
|
||||||
</xs:complexType>
|
</xs:complexType>
|
||||||
@@ -30,7 +31,6 @@
|
|||||||
<xs:simpleType name="compressionType">
|
<xs:simpleType name="compressionType">
|
||||||
<xs:restriction base="xs:token">
|
<xs:restriction base="xs:token">
|
||||||
<xs:enumeration value="deflate"/>
|
<xs:enumeration value="deflate"/>
|
||||||
<xs:enumeration value="gzip"/>
|
|
||||||
</xs:restriction>
|
</xs:restriction>
|
||||||
</xs:simpleType>
|
</xs:simpleType>
|
||||||
|
|
||||||
|
@@ -0,0 +1,27 @@
|
|||||||
|
package net.woggioni.rbcs.server.memcache.client
|
||||||
|
|
||||||
|
import io.netty.buffer.ByteBufUtil
|
||||||
|
import io.netty.buffer.Unpooled
|
||||||
|
import org.junit.jupiter.api.Assertions
|
||||||
|
import org.junit.jupiter.api.Test
|
||||||
|
import java.io.ByteArrayInputStream
|
||||||
|
import java.nio.ByteBuffer
|
||||||
|
import java.nio.channels.Channels
|
||||||
|
import kotlin.random.Random
|
||||||
|
|
||||||
|
class ByteBufferTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun test() {
|
||||||
|
val byteBuffer = ByteBuffer.allocate(0x100)
|
||||||
|
val originalBytes = Random(101325).nextBytes(0x100)
|
||||||
|
Channels.newChannel(ByteArrayInputStream(originalBytes)).use { source ->
|
||||||
|
source.read(byteBuffer)
|
||||||
|
}
|
||||||
|
byteBuffer.flip()
|
||||||
|
val buf = Unpooled.buffer()
|
||||||
|
buf.writeBytes(byteBuffer)
|
||||||
|
val finalBytes = ByteBufUtil.getBytes(buf)
|
||||||
|
Assertions.assertArrayEquals(originalBytes, finalBytes)
|
||||||
|
}
|
||||||
|
}
|
@@ -9,6 +9,9 @@ dependencies {
|
|||||||
implementation catalog.jwo
|
implementation catalog.jwo
|
||||||
implementation catalog.slf4j.api
|
implementation catalog.slf4j.api
|
||||||
implementation catalog.netty.codec.http
|
implementation catalog.netty.codec.http
|
||||||
|
implementation catalog.netty.handler
|
||||||
|
implementation catalog.netty.buffer
|
||||||
|
implementation catalog.netty.transport
|
||||||
|
|
||||||
api project(':rbcs-common')
|
api project(':rbcs-common')
|
||||||
api project(':rbcs-api')
|
api project(':rbcs-api')
|
||||||
@@ -36,3 +39,4 @@ publishing {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,30 +0,0 @@
|
|||||||
package net.woggioni.rbcs.server
|
|
||||||
|
|
||||||
import io.netty.channel.ChannelHandlerContext
|
|
||||||
import org.slf4j.Logger
|
|
||||||
import java.net.InetSocketAddress
|
|
||||||
|
|
||||||
inline fun Logger.trace(ctx : ChannelHandlerContext, messageBuilder : () -> String) {
|
|
||||||
log(this, ctx, { isTraceEnabled }, { trace(it) } , messageBuilder)
|
|
||||||
}
|
|
||||||
inline fun Logger.debug(ctx : ChannelHandlerContext, messageBuilder : () -> String) {
|
|
||||||
log(this, ctx, { isDebugEnabled }, { debug(it) } , messageBuilder)
|
|
||||||
}
|
|
||||||
inline fun Logger.info(ctx : ChannelHandlerContext, messageBuilder : () -> String) {
|
|
||||||
log(this, ctx, { isInfoEnabled }, { info(it) } , messageBuilder)
|
|
||||||
}
|
|
||||||
inline fun Logger.warn(ctx : ChannelHandlerContext, messageBuilder : () -> String) {
|
|
||||||
log(this, ctx, { isWarnEnabled }, { warn(it) } , messageBuilder)
|
|
||||||
}
|
|
||||||
inline fun Logger.error(ctx : ChannelHandlerContext, messageBuilder : () -> String) {
|
|
||||||
log(this, ctx, { isErrorEnabled }, { error(it) } , messageBuilder)
|
|
||||||
}
|
|
||||||
|
|
||||||
inline fun log(log : Logger, ctx : ChannelHandlerContext,
|
|
||||||
filter : Logger.() -> Boolean,
|
|
||||||
loggerMethod : Logger.(String) -> Unit, messageBuilder : () -> String) {
|
|
||||||
if(log.filter()) {
|
|
||||||
val clientAddress = (ctx.channel().remoteAddress() as InetSocketAddress).address.hostAddress
|
|
||||||
log.loggerMethod(clientAddress + " - " + messageBuilder())
|
|
||||||
}
|
|
||||||
}
|
|
@@ -16,7 +16,6 @@ import io.netty.handler.codec.compression.CompressionOptions
|
|||||||
import io.netty.handler.codec.http.DefaultHttpContent
|
import io.netty.handler.codec.http.DefaultHttpContent
|
||||||
import io.netty.handler.codec.http.HttpContentCompressor
|
import io.netty.handler.codec.http.HttpContentCompressor
|
||||||
import io.netty.handler.codec.http.HttpHeaderNames
|
import io.netty.handler.codec.http.HttpHeaderNames
|
||||||
import io.netty.handler.codec.http.HttpObjectAggregator
|
|
||||||
import io.netty.handler.codec.http.HttpRequest
|
import io.netty.handler.codec.http.HttpRequest
|
||||||
import io.netty.handler.codec.http.HttpServerCodec
|
import io.netty.handler.codec.http.HttpServerCodec
|
||||||
import io.netty.handler.ssl.ClientAuth
|
import io.netty.handler.ssl.ClientAuth
|
||||||
@@ -30,13 +29,15 @@ import io.netty.handler.timeout.IdleStateHandler
|
|||||||
import io.netty.util.AttributeKey
|
import io.netty.util.AttributeKey
|
||||||
import io.netty.util.concurrent.DefaultEventExecutorGroup
|
import io.netty.util.concurrent.DefaultEventExecutorGroup
|
||||||
import io.netty.util.concurrent.EventExecutorGroup
|
import io.netty.util.concurrent.EventExecutorGroup
|
||||||
|
import net.woggioni.jwo.JWO
|
||||||
|
import net.woggioni.jwo.Tuple2
|
||||||
import net.woggioni.rbcs.api.Configuration
|
import net.woggioni.rbcs.api.Configuration
|
||||||
import net.woggioni.rbcs.api.exception.ConfigurationException
|
import net.woggioni.rbcs.api.exception.ConfigurationException
|
||||||
import net.woggioni.rbcs.common.RBCS.toUrl
|
|
||||||
import net.woggioni.rbcs.common.PasswordSecurity.decodePasswordHash
|
import net.woggioni.rbcs.common.PasswordSecurity.decodePasswordHash
|
||||||
import net.woggioni.rbcs.common.PasswordSecurity.hashPassword
|
import net.woggioni.rbcs.common.PasswordSecurity.hashPassword
|
||||||
|
import net.woggioni.rbcs.common.RBCS.toUrl
|
||||||
import net.woggioni.rbcs.common.Xml
|
import net.woggioni.rbcs.common.Xml
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import net.woggioni.rbcs.common.debug
|
import net.woggioni.rbcs.common.debug
|
||||||
import net.woggioni.rbcs.common.info
|
import net.woggioni.rbcs.common.info
|
||||||
import net.woggioni.rbcs.server.auth.AbstractNettyHttpAuthenticator
|
import net.woggioni.rbcs.server.auth.AbstractNettyHttpAuthenticator
|
||||||
@@ -46,10 +47,11 @@ import net.woggioni.rbcs.server.auth.RoleAuthorizer
|
|||||||
import net.woggioni.rbcs.server.configuration.Parser
|
import net.woggioni.rbcs.server.configuration.Parser
|
||||||
import net.woggioni.rbcs.server.configuration.Serializer
|
import net.woggioni.rbcs.server.configuration.Serializer
|
||||||
import net.woggioni.rbcs.server.exception.ExceptionHandler
|
import net.woggioni.rbcs.server.exception.ExceptionHandler
|
||||||
|
import net.woggioni.rbcs.server.handler.MaxRequestSizeHandler
|
||||||
import net.woggioni.rbcs.server.handler.ServerHandler
|
import net.woggioni.rbcs.server.handler.ServerHandler
|
||||||
|
import net.woggioni.rbcs.server.handler.TraceHandler
|
||||||
|
import net.woggioni.rbcs.server.throttling.BucketManager
|
||||||
import net.woggioni.rbcs.server.throttling.ThrottlingHandler
|
import net.woggioni.rbcs.server.throttling.ThrottlingHandler
|
||||||
import net.woggioni.jwo.JWO
|
|
||||||
import net.woggioni.jwo.Tuple2
|
|
||||||
import java.io.OutputStream
|
import java.io.OutputStream
|
||||||
import java.net.InetSocketAddress
|
import java.net.InetSocketAddress
|
||||||
import java.nio.file.Files
|
import java.nio.file.Files
|
||||||
@@ -57,18 +59,23 @@ import java.nio.file.Path
|
|||||||
import java.security.KeyStore
|
import java.security.KeyStore
|
||||||
import java.security.PrivateKey
|
import java.security.PrivateKey
|
||||||
import java.security.cert.X509Certificate
|
import java.security.cert.X509Certificate
|
||||||
|
import java.time.Duration
|
||||||
|
import java.time.Instant
|
||||||
import java.util.Arrays
|
import java.util.Arrays
|
||||||
import java.util.Base64
|
import java.util.Base64
|
||||||
|
import java.util.concurrent.CompletableFuture
|
||||||
|
import java.util.concurrent.Future
|
||||||
import java.util.concurrent.TimeUnit
|
import java.util.concurrent.TimeUnit
|
||||||
|
import java.util.concurrent.TimeoutException
|
||||||
import java.util.regex.Matcher
|
import java.util.regex.Matcher
|
||||||
import java.util.regex.Pattern
|
import java.util.regex.Pattern
|
||||||
import javax.naming.ldap.LdapName
|
import javax.naming.ldap.LdapName
|
||||||
import javax.net.ssl.SSLPeerUnverifiedException
|
import javax.net.ssl.SSLPeerUnverifiedException
|
||||||
|
|
||||||
class RemoteBuildCacheServer(private val cfg: Configuration) {
|
class RemoteBuildCacheServer(private val cfg: Configuration) {
|
||||||
private val log = contextLogger()
|
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
|
private val log = createLogger<RemoteBuildCacheServer>()
|
||||||
|
|
||||||
val userAttribute: AttributeKey<Configuration.User> = AttributeKey.valueOf("user")
|
val userAttribute: AttributeKey<Configuration.User> = AttributeKey.valueOf("user")
|
||||||
val groupAttribute: AttributeKey<Set<Configuration.Group>> = AttributeKey.valueOf("group")
|
val groupAttribute: AttributeKey<Set<Configuration.Group>> = AttributeKey.valueOf("group")
|
||||||
@@ -128,11 +135,12 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
val clientCertificate = peerCertificates.first() as X509Certificate
|
val clientCertificate = peerCertificates.first() as X509Certificate
|
||||||
val user = userExtractor?.extract(clientCertificate)
|
val user = userExtractor?.extract(clientCertificate)
|
||||||
val group = groupExtractor?.extract(clientCertificate)
|
val group = groupExtractor?.extract(clientCertificate)
|
||||||
val allGroups = ((user?.groups ?: emptySet()).asSequence() + sequenceOf(group).filterNotNull()).toSet()
|
val allGroups =
|
||||||
|
((user?.groups ?: emptySet()).asSequence() + sequenceOf(group).filterNotNull()).toSet()
|
||||||
AuthenticationResult(user, allGroups)
|
AuthenticationResult(user, allGroups)
|
||||||
} ?: anonymousUserGroups?.let{ AuthenticationResult(null, it) }
|
} ?: anonymousUserGroups?.let { AuthenticationResult(null, it) }
|
||||||
} catch (es: SSLPeerUnverifiedException) {
|
} catch (es: SSLPeerUnverifiedException) {
|
||||||
anonymousUserGroups?.let{ AuthenticationResult(null, it) }
|
anonymousUserGroups?.let { AuthenticationResult(null, it) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -141,7 +149,9 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
private class NettyHttpBasicAuthenticator(
|
private class NettyHttpBasicAuthenticator(
|
||||||
private val users: Map<String, Configuration.User>, authorizer: Authorizer
|
private val users: Map<String, Configuration.User>, authorizer: Authorizer
|
||||||
) : AbstractNettyHttpAuthenticator(authorizer) {
|
) : AbstractNettyHttpAuthenticator(authorizer) {
|
||||||
private val log = contextLogger()
|
companion object {
|
||||||
|
private val log = createLogger<NettyHttpBasicAuthenticator>()
|
||||||
|
}
|
||||||
|
|
||||||
override fun authenticate(ctx: ChannelHandlerContext, req: HttpRequest): AuthenticationResult? {
|
override fun authenticate(ctx: ChannelHandlerContext, req: HttpRequest): AuthenticationResult? {
|
||||||
val authorizationHeader = req.headers()[HttpHeaderNames.AUTHORIZATION] ?: let {
|
val authorizationHeader = req.headers()[HttpHeaderNames.AUTHORIZATION] ?: let {
|
||||||
@@ -191,7 +201,7 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
private class ServerInitializer(
|
private class ServerInitializer(
|
||||||
private val cfg: Configuration,
|
private val cfg: Configuration,
|
||||||
private val eventExecutorGroup: EventExecutorGroup
|
private val eventExecutorGroup: EventExecutorGroup
|
||||||
) : ChannelInitializer<Channel>() {
|
) : ChannelInitializer<Channel>(), AutoCloseable {
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
private fun createSslCtx(tls: Configuration.Tls): SslContext {
|
private fun createSslCtx(tls: Configuration.Tls): SslContext {
|
||||||
@@ -213,7 +223,7 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
trustManager(
|
trustManager(
|
||||||
ClientCertificateValidator.getTrustManager(ts, trustStore.isCheckCertificateStatus)
|
ClientCertificateValidator.getTrustManager(ts, trustStore.isCheckCertificateStatus)
|
||||||
)
|
)
|
||||||
if(trustStore.isRequireClientCertificate) ClientAuth.REQUIRE
|
if (trustStore.isRequireClientCertificate) ClientAuth.REQUIRE
|
||||||
else ClientAuth.OPTIONAL
|
else ClientAuth.OPTIONAL
|
||||||
} ?: ClientAuth.NONE
|
} ?: ClientAuth.NONE
|
||||||
clientAuth(clientAuth)
|
clientAuth(clientAuth)
|
||||||
@@ -241,18 +251,13 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
}
|
}
|
||||||
return keystore
|
return keystore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private val log = createLogger<ServerInitializer>()
|
||||||
}
|
}
|
||||||
|
|
||||||
private val log = contextLogger()
|
private val cacheHandlerFactory = cfg.cache.materialize()
|
||||||
|
|
||||||
private val serverHandler = let {
|
private val bucketManager = BucketManager.from(cfg)
|
||||||
val cacheImplementation = cfg.cache.materialize()
|
|
||||||
val prefix = Path.of("/").resolve(Path.of(cfg.serverPath ?: "/"))
|
|
||||||
ServerHandler(cacheImplementation, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
private val exceptionHandler = ExceptionHandler()
|
|
||||||
private val throttlingHandler = ThrottlingHandler(cfg)
|
|
||||||
|
|
||||||
private val authenticator = when (val auth = cfg.authentication) {
|
private val authenticator = when (val auth = cfg.authentication) {
|
||||||
is Configuration.BasicAuthentication -> NettyHttpBasicAuthenticator(cfg.users, RoleAuthorizer())
|
is Configuration.BasicAuthentication -> NettyHttpBasicAuthenticator(cfg.users, RoleAuthorizer())
|
||||||
@@ -311,7 +316,7 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
cfg.connection.also { conn ->
|
cfg.connection.also { conn ->
|
||||||
val readTimeout = conn.readTimeout.toMillis()
|
val readTimeout = conn.readTimeout.toMillis()
|
||||||
val writeTimeout = conn.writeTimeout.toMillis()
|
val writeTimeout = conn.writeTimeout.toMillis()
|
||||||
if(readTimeout > 0 || writeTimeout > 0) {
|
if (readTimeout > 0 || writeTimeout > 0) {
|
||||||
pipeline.addLast(
|
pipeline.addLast(
|
||||||
IdleStateHandler(
|
IdleStateHandler(
|
||||||
false,
|
false,
|
||||||
@@ -325,7 +330,7 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
val readIdleTimeout = conn.readIdleTimeout.toMillis()
|
val readIdleTimeout = conn.readIdleTimeout.toMillis()
|
||||||
val writeIdleTimeout = conn.writeIdleTimeout.toMillis()
|
val writeIdleTimeout = conn.writeIdleTimeout.toMillis()
|
||||||
val idleTimeout = conn.idleTimeout.toMillis()
|
val idleTimeout = conn.idleTimeout.toMillis()
|
||||||
if(readIdleTimeout > 0 || writeIdleTimeout > 0 || idleTimeout > 0) {
|
if (readIdleTimeout > 0 || writeIdleTimeout > 0 || idleTimeout > 0) {
|
||||||
pipeline.addLast(
|
pipeline.addLast(
|
||||||
IdleStateHandler(
|
IdleStateHandler(
|
||||||
true,
|
true,
|
||||||
@@ -340,16 +345,19 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
pipeline.addLast(object : ChannelInboundHandlerAdapter() {
|
pipeline.addLast(object : ChannelInboundHandlerAdapter() {
|
||||||
override fun userEventTriggered(ctx: ChannelHandlerContext, evt: Any) {
|
override fun userEventTriggered(ctx: ChannelHandlerContext, evt: Any) {
|
||||||
if (evt is IdleStateEvent) {
|
if (evt is IdleStateEvent) {
|
||||||
when(evt.state()) {
|
when (evt.state()) {
|
||||||
IdleState.READER_IDLE -> log.debug {
|
IdleState.READER_IDLE -> log.debug {
|
||||||
"Read timeout reached on channel ${ch.id().asShortText()}, closing the connection"
|
"Read timeout reached on channel ${ch.id().asShortText()}, closing the connection"
|
||||||
}
|
}
|
||||||
|
|
||||||
IdleState.WRITER_IDLE -> log.debug {
|
IdleState.WRITER_IDLE -> log.debug {
|
||||||
"Write timeout reached on channel ${ch.id().asShortText()}, closing the connection"
|
"Write timeout reached on channel ${ch.id().asShortText()}, closing the connection"
|
||||||
}
|
}
|
||||||
|
|
||||||
IdleState.ALL_IDLE -> log.debug {
|
IdleState.ALL_IDLE -> log.debug {
|
||||||
"Idle timeout reached on channel ${ch.id().asShortText()}, closing the connection"
|
"Idle timeout reached on channel ${ch.id().asShortText()}, closing the connection"
|
||||||
}
|
}
|
||||||
|
|
||||||
null -> throw IllegalStateException("This should never happen")
|
null -> throw IllegalStateException("This should never happen")
|
||||||
}
|
}
|
||||||
ctx.close()
|
ctx.close()
|
||||||
@@ -360,40 +368,94 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
pipeline.addLast(SSL_HANDLER_NAME, it)
|
pipeline.addLast(SSL_HANDLER_NAME, it)
|
||||||
}
|
}
|
||||||
pipeline.addLast(HttpServerCodec())
|
pipeline.addLast(HttpServerCodec())
|
||||||
|
pipeline.addLast(MaxRequestSizeHandler.NAME, MaxRequestSizeHandler(cfg.connection.maxRequestSize))
|
||||||
pipeline.addLast(HttpChunkContentCompressor(1024))
|
pipeline.addLast(HttpChunkContentCompressor(1024))
|
||||||
pipeline.addLast(ChunkedWriteHandler())
|
pipeline.addLast(ChunkedWriteHandler())
|
||||||
pipeline.addLast(HttpObjectAggregator(cfg.connection.maxRequestSize))
|
|
||||||
authenticator?.let {
|
authenticator?.let {
|
||||||
pipeline.addLast(it)
|
pipeline.addLast(it)
|
||||||
}
|
}
|
||||||
pipeline.addLast(throttlingHandler)
|
pipeline.addLast(ThrottlingHandler(bucketManager, cfg.connection))
|
||||||
pipeline.addLast(eventExecutorGroup, serverHandler)
|
|
||||||
pipeline.addLast(exceptionHandler)
|
val serverHandler = let {
|
||||||
|
val prefix = Path.of("/").resolve(Path.of(cfg.serverPath ?: "/"))
|
||||||
|
ServerHandler(prefix)
|
||||||
|
}
|
||||||
|
pipeline.addLast(eventExecutorGroup, ServerHandler.NAME, serverHandler)
|
||||||
|
pipeline.addLast(cacheHandlerFactory.newHandler())
|
||||||
|
pipeline.addLast(TraceHandler)
|
||||||
|
pipeline.addLast(ExceptionHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun close() {
|
||||||
|
cacheHandlerFactory.close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ServerHandle(
|
class ServerHandle(
|
||||||
httpChannelFuture: ChannelFuture,
|
closeFuture: ChannelFuture,
|
||||||
private val executorGroups: Iterable<EventExecutorGroup>
|
private val bossGroup: EventExecutorGroup,
|
||||||
) : AutoCloseable {
|
private val executorGroups: Iterable<EventExecutorGroup>,
|
||||||
private val httpChannel: Channel = httpChannelFuture.channel()
|
private val serverInitializer: AutoCloseable,
|
||||||
private val closeFuture: ChannelFuture = httpChannel.closeFuture()
|
) : Future<Void> by from(closeFuture, executorGroups, serverInitializer) {
|
||||||
private val log = contextLogger()
|
|
||||||
|
|
||||||
fun shutdown(): ChannelFuture {
|
companion object {
|
||||||
return httpChannel.close()
|
private val log = createLogger<ServerHandle>()
|
||||||
}
|
|
||||||
|
|
||||||
override fun close() {
|
private fun from(
|
||||||
try {
|
closeFuture: ChannelFuture,
|
||||||
closeFuture.sync()
|
executorGroups: Iterable<EventExecutorGroup>,
|
||||||
} finally {
|
serverInitializer: AutoCloseable
|
||||||
executorGroups.forEach {
|
): CompletableFuture<Void> {
|
||||||
it.shutdownGracefully().sync()
|
val result = CompletableFuture<Void>()
|
||||||
|
closeFuture.addListener {
|
||||||
|
val errors = mutableListOf<Throwable>()
|
||||||
|
val deadline = Instant.now().plusSeconds(20)
|
||||||
|
|
||||||
|
|
||||||
|
for (executorGroup in executorGroups) {
|
||||||
|
val future = executorGroup.terminationFuture()
|
||||||
|
try {
|
||||||
|
val now = Instant.now()
|
||||||
|
if (now > deadline) {
|
||||||
|
future.get(0, TimeUnit.SECONDS)
|
||||||
|
} else {
|
||||||
|
future.get(Duration.between(now, deadline).toMillis(), TimeUnit.MILLISECONDS)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (te: TimeoutException) {
|
||||||
|
errors.addLast(te)
|
||||||
|
log.warn("Timeout while waiting for shutdown of $executorGroup", te)
|
||||||
|
} catch (ex: Throwable) {
|
||||||
|
log.warn(ex.message, ex)
|
||||||
|
errors.addLast(ex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
serverInitializer.close()
|
||||||
|
} catch (ex: Throwable) {
|
||||||
|
log.error(ex.message, ex)
|
||||||
|
errors.addLast(ex)
|
||||||
|
}
|
||||||
|
if(errors.isEmpty()) {
|
||||||
|
result.complete(null)
|
||||||
|
} else {
|
||||||
|
result.completeExceptionally(errors.first())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.thenAccept {
|
||||||
|
log.info {
|
||||||
|
"RemoteBuildCacheServer has been gracefully shut down"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.info {
|
}
|
||||||
"RemoteBuildCacheServer has been gracefully shut down"
|
|
||||||
|
|
||||||
|
fun sendShutdownSignal() {
|
||||||
|
bossGroup.shutdownGracefully()
|
||||||
|
executorGroups.map {
|
||||||
|
it.shutdownGracefully()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -411,11 +473,12 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
}
|
}
|
||||||
DefaultEventExecutorGroup(Runtime.getRuntime().availableProcessors(), threadFactory)
|
DefaultEventExecutorGroup(Runtime.getRuntime().availableProcessors(), threadFactory)
|
||||||
}
|
}
|
||||||
|
val serverInitializer = ServerInitializer(cfg, eventExecutorGroup)
|
||||||
val bootstrap = ServerBootstrap().apply {
|
val bootstrap = ServerBootstrap().apply {
|
||||||
// Configure the server
|
// Configure the server
|
||||||
group(bossGroup, workerGroup)
|
group(bossGroup, workerGroup)
|
||||||
channel(serverSocketChannel)
|
channel(serverSocketChannel)
|
||||||
childHandler(ServerInitializer(cfg, eventExecutorGroup))
|
childHandler(serverInitializer)
|
||||||
option(ChannelOption.SO_BACKLOG, cfg.incomingConnectionsBacklogSize)
|
option(ChannelOption.SO_BACKLOG, cfg.incomingConnectionsBacklogSize)
|
||||||
childOption(ChannelOption.SO_KEEPALIVE, true)
|
childOption(ChannelOption.SO_KEEPALIVE, true)
|
||||||
}
|
}
|
||||||
@@ -423,10 +486,16 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
|
|||||||
|
|
||||||
// Bind and start to accept incoming connections.
|
// Bind and start to accept incoming connections.
|
||||||
val bindAddress = InetSocketAddress(cfg.host, cfg.port)
|
val bindAddress = InetSocketAddress(cfg.host, cfg.port)
|
||||||
val httpChannel = bootstrap.bind(bindAddress).sync()
|
val httpChannel = bootstrap.bind(bindAddress).sync().channel()
|
||||||
log.info {
|
log.info {
|
||||||
"RemoteBuildCacheServer is listening on ${cfg.host}:${cfg.port}"
|
"RemoteBuildCacheServer is listening on ${cfg.host}:${cfg.port}"
|
||||||
}
|
}
|
||||||
return ServerHandle(httpChannel, setOf(bossGroup, workerGroup, eventExecutorGroup))
|
|
||||||
|
return ServerHandle(
|
||||||
|
httpChannel.closeFuture(),
|
||||||
|
bossGroup,
|
||||||
|
setOf(workerGroup, eventExecutorGroup),
|
||||||
|
serverInitializer
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -6,6 +6,7 @@ import io.netty.channel.ChannelHandlerContext
|
|||||||
import io.netty.channel.ChannelInboundHandlerAdapter
|
import io.netty.channel.ChannelInboundHandlerAdapter
|
||||||
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
||||||
import io.netty.handler.codec.http.FullHttpResponse
|
import io.netty.handler.codec.http.FullHttpResponse
|
||||||
|
import io.netty.handler.codec.http.HttpContent
|
||||||
import io.netty.handler.codec.http.HttpHeaderNames
|
import io.netty.handler.codec.http.HttpHeaderNames
|
||||||
import io.netty.handler.codec.http.HttpRequest
|
import io.netty.handler.codec.http.HttpRequest
|
||||||
import io.netty.handler.codec.http.HttpResponseStatus
|
import io.netty.handler.codec.http.HttpResponseStatus
|
||||||
@@ -57,6 +58,8 @@ abstract class AbstractNettyHttpAuthenticator(private val authorizer: Authorizer
|
|||||||
} else {
|
} else {
|
||||||
authorizationFailure(ctx, msg)
|
authorizationFailure(ctx, msg)
|
||||||
}
|
}
|
||||||
|
} else if(msg is HttpContent) {
|
||||||
|
ctx.fireChannelRead(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,12 +1,14 @@
|
|||||||
package net.woggioni.rbcs.server.cache
|
package net.woggioni.rbcs.server.cache
|
||||||
|
|
||||||
import io.netty.buffer.ByteBuf
|
|
||||||
import net.woggioni.rbcs.api.Cache
|
|
||||||
import net.woggioni.rbcs.common.ByteBufInputStream
|
|
||||||
import net.woggioni.rbcs.common.RBCS.digestString
|
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
|
||||||
import net.woggioni.jwo.JWO
|
import net.woggioni.jwo.JWO
|
||||||
import net.woggioni.jwo.LockFile
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
|
import net.woggioni.rbcs.common.createLogger
|
||||||
|
import java.io.ByteArrayOutputStream
|
||||||
|
import java.io.InputStream
|
||||||
|
import java.io.ObjectInputStream
|
||||||
|
import java.io.ObjectOutputStream
|
||||||
|
import java.io.Serializable
|
||||||
|
import java.nio.ByteBuffer
|
||||||
import java.nio.channels.Channels
|
import java.nio.channels.Channels
|
||||||
import java.nio.channels.FileChannel
|
import java.nio.channels.FileChannel
|
||||||
import java.nio.file.Files
|
import java.nio.file.Files
|
||||||
@@ -14,117 +16,143 @@ import java.nio.file.Path
|
|||||||
import java.nio.file.StandardCopyOption
|
import java.nio.file.StandardCopyOption
|
||||||
import java.nio.file.StandardOpenOption
|
import java.nio.file.StandardOpenOption
|
||||||
import java.nio.file.attribute.BasicFileAttributes
|
import java.nio.file.attribute.BasicFileAttributes
|
||||||
import java.security.MessageDigest
|
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.Instant
|
import java.time.Instant
|
||||||
import java.util.concurrent.CompletableFuture
|
|
||||||
import java.util.concurrent.atomic.AtomicReference
|
|
||||||
import java.util.zip.Deflater
|
|
||||||
import java.util.zip.DeflaterOutputStream
|
|
||||||
import java.util.zip.Inflater
|
|
||||||
import java.util.zip.InflaterInputStream
|
|
||||||
|
|
||||||
class FileSystemCache(
|
class FileSystemCache(
|
||||||
val root: Path,
|
val root: Path,
|
||||||
val maxAge: Duration,
|
val maxAge: Duration
|
||||||
val digestAlgorithm: String?,
|
) : AutoCloseable {
|
||||||
val compressionEnabled: Boolean,
|
|
||||||
val compressionLevel: Int
|
class EntryValue(val metadata: CacheValueMetadata, val channel : FileChannel, val offset : Long, val size : Long) : Serializable
|
||||||
) : Cache {
|
|
||||||
|
|
||||||
private companion object {
|
private companion object {
|
||||||
@JvmStatic
|
private val log = createLogger<FileSystemCache>()
|
||||||
private val log = contextLogger()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
init {
|
init {
|
||||||
Files.createDirectories(root)
|
Files.createDirectories(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
private var nextGc = AtomicReference(Instant.now().plus(maxAge))
|
@Volatile
|
||||||
|
private var running = true
|
||||||
|
|
||||||
override fun get(key: String) = (digestAlgorithm
|
private var nextGc = Instant.now()
|
||||||
?.let(MessageDigest::getInstance)
|
|
||||||
?.let { md ->
|
fun get(key: String): EntryValue? =
|
||||||
digestString(key.toByteArray(), md)
|
root.resolve(key).takeIf(Files::exists)
|
||||||
} ?: key).let { digest ->
|
|
||||||
root.resolve(digest).takeIf(Files::exists)
|
|
||||||
?.let { file ->
|
?.let { file ->
|
||||||
file.takeIf(Files::exists)?.let { file ->
|
val size = Files.size(file)
|
||||||
if (compressionEnabled) {
|
val channel = FileChannel.open(file, StandardOpenOption.READ)
|
||||||
val inflater = Inflater()
|
val source = Channels.newInputStream(channel)
|
||||||
Channels.newChannel(
|
val tmp = ByteArray(Integer.BYTES)
|
||||||
InflaterInputStream(
|
val buffer = ByteBuffer.wrap(tmp)
|
||||||
Channels.newInputStream(
|
source.read(tmp)
|
||||||
FileChannel.open(
|
buffer.rewind()
|
||||||
file,
|
val offset = (Integer.BYTES + buffer.getInt()).toLong()
|
||||||
StandardOpenOption.READ
|
var count = 0
|
||||||
)
|
val wrapper = object : InputStream() {
|
||||||
), inflater
|
override fun read(): Int {
|
||||||
)
|
return source.read().also {
|
||||||
)
|
if (it > 0) count += it
|
||||||
} else {
|
}
|
||||||
FileChannel.open(file, StandardOpenOption.READ)
|
}
|
||||||
|
|
||||||
|
override fun read(b: ByteArray, off: Int, len: Int): Int {
|
||||||
|
return source.read(b, off, len).also {
|
||||||
|
if (it > 0) count += it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun close() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}.also {
|
val metadata = ObjectInputStream(wrapper).use { ois ->
|
||||||
gc()
|
ois.readObject() as CacheValueMetadata
|
||||||
}.let {
|
}
|
||||||
CompletableFuture.completedFuture(it)
|
EntryValue(metadata, channel, offset, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class FileSink(metadata: CacheValueMetadata, private val path: Path, private val tmpFile: Path) {
|
||||||
|
val channel: FileChannel
|
||||||
|
|
||||||
|
init {
|
||||||
|
val baos = ByteArrayOutputStream()
|
||||||
|
ObjectOutputStream(baos).use {
|
||||||
|
it.writeObject(metadata)
|
||||||
|
}
|
||||||
|
Files.newOutputStream(tmpFile).use {
|
||||||
|
val bytes = baos.toByteArray()
|
||||||
|
val buffer = ByteBuffer.allocate(Integer.BYTES)
|
||||||
|
buffer.putInt(bytes.size)
|
||||||
|
buffer.rewind()
|
||||||
|
it.write(buffer.array())
|
||||||
|
it.write(bytes)
|
||||||
|
}
|
||||||
|
channel = FileChannel.open(tmpFile, StandardOpenOption.APPEND)
|
||||||
|
}
|
||||||
|
|
||||||
|
fun commit() {
|
||||||
|
channel.close()
|
||||||
|
Files.move(tmpFile, path, StandardCopyOption.ATOMIC_MOVE)
|
||||||
|
}
|
||||||
|
|
||||||
|
fun rollback() {
|
||||||
|
channel.close()
|
||||||
|
Files.delete(path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun put(key: String, content: ByteBuf): CompletableFuture<Void> {
|
fun put(
|
||||||
(digestAlgorithm
|
key: String,
|
||||||
?.let(MessageDigest::getInstance)
|
metadata: CacheValueMetadata,
|
||||||
?.let { md ->
|
): FileSink {
|
||||||
digestString(key.toByteArray(), md)
|
val file = root.resolve(key)
|
||||||
} ?: key).let { digest ->
|
val tmpFile = Files.createTempFile(root, null, ".tmp")
|
||||||
val file = root.resolve(digest)
|
return FileSink(metadata, file, tmpFile)
|
||||||
val tmpFile = Files.createTempFile(root, null, ".tmp")
|
}
|
||||||
try {
|
|
||||||
Files.newOutputStream(tmpFile).let {
|
private val garbageCollector = Thread.ofVirtual().name("file-system-cache-gc").start {
|
||||||
if (compressionEnabled) {
|
while (running) {
|
||||||
val deflater = Deflater(compressionLevel)
|
|
||||||
DeflaterOutputStream(it, deflater)
|
|
||||||
} else {
|
|
||||||
it
|
|
||||||
}
|
|
||||||
}.use {
|
|
||||||
JWO.copy(ByteBufInputStream(content), it)
|
|
||||||
}
|
|
||||||
Files.move(tmpFile, file, StandardCopyOption.ATOMIC_MOVE)
|
|
||||||
} catch (t: Throwable) {
|
|
||||||
Files.delete(tmpFile)
|
|
||||||
throw t
|
|
||||||
}
|
|
||||||
}.also {
|
|
||||||
gc()
|
gc()
|
||||||
}
|
}
|
||||||
return CompletableFuture.completedFuture(null)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun gc() {
|
private fun gc() {
|
||||||
val now = Instant.now()
|
val now = Instant.now()
|
||||||
val oldValue = nextGc.getAndSet(now.plus(maxAge))
|
if (nextGc < now) {
|
||||||
if (oldValue < now) {
|
val oldestEntry = actualGc(now)
|
||||||
actualGc(now)
|
nextGc = (oldestEntry ?: now).plus(maxAge)
|
||||||
}
|
}
|
||||||
|
Thread.sleep(minOf(Duration.between(now, nextGc), Duration.ofSeconds(1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@Synchronized
|
/**
|
||||||
private fun actualGc(now: Instant) {
|
* Returns the creation timestamp of the oldest cache entry (if any)
|
||||||
Files.list(root).filter {
|
*/
|
||||||
val creationTimeStamp = Files.readAttributes(it, BasicFileAttributes::class.java)
|
private fun actualGc(now: Instant): Instant? {
|
||||||
.creationTime()
|
var result: Instant? = null
|
||||||
.toInstant()
|
Files.list(root)
|
||||||
now > creationTimeStamp.plus(maxAge)
|
.filter { path ->
|
||||||
}.forEach { file ->
|
JWO.splitExtension(path)
|
||||||
LockFile.acquire(file, false).use {
|
.map { it._2 }
|
||||||
Files.delete(file)
|
.map { it != ".tmp" }
|
||||||
|
.orElse(true)
|
||||||
}
|
}
|
||||||
}
|
.filter {
|
||||||
|
val creationTimeStamp = Files.readAttributes(it, BasicFileAttributes::class.java)
|
||||||
|
.creationTime()
|
||||||
|
.toInstant()
|
||||||
|
if (result == null || creationTimeStamp < result) {
|
||||||
|
result = creationTimeStamp
|
||||||
|
}
|
||||||
|
now > creationTimeStamp.plus(maxAge)
|
||||||
|
}.forEach(Files::delete)
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun close() {}
|
override fun close() {
|
||||||
|
running = false
|
||||||
|
garbageCollector.join()
|
||||||
|
}
|
||||||
}
|
}
|
@@ -1,8 +1,9 @@
|
|||||||
package net.woggioni.rbcs.server.cache
|
package net.woggioni.rbcs.server.cache
|
||||||
|
|
||||||
|
import net.woggioni.jwo.Application
|
||||||
|
import net.woggioni.rbcs.api.CacheHandlerFactory
|
||||||
import net.woggioni.rbcs.api.Configuration
|
import net.woggioni.rbcs.api.Configuration
|
||||||
import net.woggioni.rbcs.common.RBCS
|
import net.woggioni.rbcs.common.RBCS
|
||||||
import net.woggioni.jwo.Application
|
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
|
|
||||||
@@ -12,14 +13,18 @@ data class FileSystemCacheConfiguration(
|
|||||||
val digestAlgorithm : String?,
|
val digestAlgorithm : String?,
|
||||||
val compressionEnabled: Boolean,
|
val compressionEnabled: Boolean,
|
||||||
val compressionLevel: Int,
|
val compressionLevel: Int,
|
||||||
|
val chunkSize: Int,
|
||||||
) : Configuration.Cache {
|
) : Configuration.Cache {
|
||||||
override fun materialize() = FileSystemCache(
|
|
||||||
root ?: Application.builder("rbcs").build().computeCacheDirectory(),
|
override fun materialize() = object : CacheHandlerFactory {
|
||||||
maxAge,
|
private val cache = FileSystemCache(root ?: Application.builder("rbcs").build().computeCacheDirectory(), maxAge)
|
||||||
digestAlgorithm,
|
|
||||||
compressionEnabled,
|
override fun close() {
|
||||||
compressionLevel
|
cache.close()
|
||||||
)
|
}
|
||||||
|
|
||||||
|
override fun newHandler() = FileSystemCacheHandler(cache, digestAlgorithm, compressionEnabled, compressionLevel, chunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
override fun getNamespaceURI() = RBCS.RBCS_NAMESPACE_URI
|
override fun getNamespaceURI() = RBCS.RBCS_NAMESPACE_URI
|
||||||
|
|
||||||
|
122
rbcs-server/src/main/kotlin/net/woggioni/rbcs/server/cache/FileSystemCacheHandler.kt
vendored
Normal file
122
rbcs-server/src/main/kotlin/net/woggioni/rbcs/server/cache/FileSystemCacheHandler.kt
vendored
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package net.woggioni.rbcs.server.cache
|
||||||
|
|
||||||
|
import io.netty.buffer.ByteBuf
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.SimpleChannelInboundHandler
|
||||||
|
import io.netty.handler.codec.http.LastHttpContent
|
||||||
|
import io.netty.handler.stream.ChunkedNioFile
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheContent
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheGetRequest
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutRequest
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueNotFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.LastCacheContent
|
||||||
|
import net.woggioni.rbcs.common.RBCS.processCacheKey
|
||||||
|
import java.nio.channels.Channels
|
||||||
|
import java.util.Base64
|
||||||
|
import java.util.zip.Deflater
|
||||||
|
import java.util.zip.DeflaterOutputStream
|
||||||
|
import java.util.zip.InflaterInputStream
|
||||||
|
|
||||||
|
class FileSystemCacheHandler(
|
||||||
|
private val cache: FileSystemCache,
|
||||||
|
private val digestAlgorithm: String?,
|
||||||
|
private val compressionEnabled: Boolean,
|
||||||
|
private val compressionLevel: Int,
|
||||||
|
private val chunkSize: Int
|
||||||
|
) : SimpleChannelInboundHandler<CacheMessage>() {
|
||||||
|
|
||||||
|
private inner class InProgressPutRequest(
|
||||||
|
val key : String,
|
||||||
|
private val fileSink : FileSystemCache.FileSink
|
||||||
|
) {
|
||||||
|
|
||||||
|
private val stream = Channels.newOutputStream(fileSink.channel).let {
|
||||||
|
if (compressionEnabled) {
|
||||||
|
DeflaterOutputStream(it, Deflater(compressionLevel))
|
||||||
|
} else {
|
||||||
|
it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fun write(buf: ByteBuf) {
|
||||||
|
buf.readBytes(stream, buf.readableBytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fun commit() {
|
||||||
|
stream.close()
|
||||||
|
fileSink.commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
fun rollback() {
|
||||||
|
fileSink.rollback()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private var inProgressPutRequest: InProgressPutRequest? = null
|
||||||
|
|
||||||
|
override fun channelRead0(ctx: ChannelHandlerContext, msg: CacheMessage) {
|
||||||
|
when (msg) {
|
||||||
|
is CacheGetRequest -> handleGetRequest(ctx, msg)
|
||||||
|
is CachePutRequest -> handlePutRequest(ctx, msg)
|
||||||
|
is LastCacheContent -> handleLastCacheContent(ctx, msg)
|
||||||
|
is CacheContent -> handleCacheContent(ctx, msg)
|
||||||
|
else -> ctx.fireChannelRead(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleGetRequest(ctx: ChannelHandlerContext, msg: CacheGetRequest) {
|
||||||
|
val key = String(Base64.getUrlEncoder().encode(processCacheKey(msg.key, digestAlgorithm)))
|
||||||
|
cache.get(key)?.also { entryValue ->
|
||||||
|
ctx.writeAndFlush(CacheValueFoundResponse(msg.key, entryValue.metadata))
|
||||||
|
entryValue.channel.let { channel ->
|
||||||
|
if(compressionEnabled) {
|
||||||
|
InflaterInputStream(Channels.newInputStream(channel)).use { stream ->
|
||||||
|
|
||||||
|
outerLoop@
|
||||||
|
while (true) {
|
||||||
|
val buf = ctx.alloc().heapBuffer(chunkSize)
|
||||||
|
while(buf.readableBytes() < chunkSize) {
|
||||||
|
val read = buf.writeBytes(stream, chunkSize)
|
||||||
|
if(read < 0) {
|
||||||
|
ctx.writeAndFlush(LastCacheContent(buf))
|
||||||
|
break@outerLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(CacheContent(buf))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ctx.writeAndFlush(ChunkedNioFile(channel, entryValue.offset, entryValue.size - entryValue.offset, chunkSize))
|
||||||
|
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} ?: ctx.writeAndFlush(CacheValueNotFoundResponse())
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handlePutRequest(ctx: ChannelHandlerContext, msg: CachePutRequest) {
|
||||||
|
val key = String(Base64.getUrlEncoder().encode(processCacheKey(msg.key, digestAlgorithm)))
|
||||||
|
val sink = cache.put(key, msg.metadata)
|
||||||
|
inProgressPutRequest = InProgressPutRequest(msg.key, sink)
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleCacheContent(ctx: ChannelHandlerContext, msg: CacheContent) {
|
||||||
|
inProgressPutRequest!!.write(msg.content())
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleLastCacheContent(ctx: ChannelHandlerContext, msg: LastCacheContent) {
|
||||||
|
inProgressPutRequest?.let { request ->
|
||||||
|
inProgressPutRequest = null
|
||||||
|
request.write(msg.content())
|
||||||
|
request.commit()
|
||||||
|
ctx.writeAndFlush(CachePutResponse(request.key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
|
inProgressPutRequest?.rollback()
|
||||||
|
super.exceptionCaught(ctx, cause)
|
||||||
|
}
|
||||||
|
}
|
@@ -31,13 +31,17 @@ class FileSystemCacheProvider : CacheProvider<FileSystemCacheConfiguration> {
|
|||||||
?.let(String::toInt)
|
?.let(String::toInt)
|
||||||
?: Deflater.DEFAULT_COMPRESSION
|
?: Deflater.DEFAULT_COMPRESSION
|
||||||
val digestAlgorithm = el.renderAttribute("digest") ?: "MD5"
|
val digestAlgorithm = el.renderAttribute("digest") ?: "MD5"
|
||||||
|
val chunkSize = el.renderAttribute("chunk-size")
|
||||||
|
?.let(Integer::decode)
|
||||||
|
?: 0x10000
|
||||||
|
|
||||||
return FileSystemCacheConfiguration(
|
return FileSystemCacheConfiguration(
|
||||||
path,
|
path,
|
||||||
maxAge,
|
maxAge,
|
||||||
digestAlgorithm,
|
digestAlgorithm,
|
||||||
enableCompression,
|
enableCompression,
|
||||||
compressionLevel
|
compressionLevel,
|
||||||
|
chunkSize
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,7 +50,9 @@ class FileSystemCacheProvider : CacheProvider<FileSystemCacheConfiguration> {
|
|||||||
Xml.of(doc, result) {
|
Xml.of(doc, result) {
|
||||||
val prefix = doc.lookupPrefix(RBCS.RBCS_NAMESPACE_URI)
|
val prefix = doc.lookupPrefix(RBCS.RBCS_NAMESPACE_URI)
|
||||||
attr("xs:type", "${prefix}:fileSystemCacheType", RBCS.XML_SCHEMA_NAMESPACE_URI)
|
attr("xs:type", "${prefix}:fileSystemCacheType", RBCS.XML_SCHEMA_NAMESPACE_URI)
|
||||||
attr("path", root.toString())
|
root?.let {
|
||||||
|
attr("path", it.toString())
|
||||||
|
}
|
||||||
attr("max-age", maxAge.toString())
|
attr("max-age", maxAge.toString())
|
||||||
digestAlgorithm?.let { digestAlgorithm ->
|
digestAlgorithm?.let { digestAlgorithm ->
|
||||||
attr("digest", digestAlgorithm)
|
attr("digest", digestAlgorithm)
|
||||||
@@ -57,6 +63,7 @@ class FileSystemCacheProvider : CacheProvider<FileSystemCacheConfiguration> {
|
|||||||
}?.let {
|
}?.let {
|
||||||
attr("compression-level", it.toString())
|
attr("compression-level", it.toString())
|
||||||
}
|
}
|
||||||
|
attr("chunk-size", chunkSize.toString())
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
@@ -1,89 +1,85 @@
|
|||||||
package net.woggioni.rbcs.server.cache
|
package net.woggioni.rbcs.server.cache
|
||||||
|
|
||||||
import io.netty.buffer.ByteBuf
|
import io.netty.buffer.ByteBuf
|
||||||
import net.woggioni.rbcs.api.Cache
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
import net.woggioni.rbcs.common.ByteBufInputStream
|
import net.woggioni.rbcs.common.createLogger
|
||||||
import net.woggioni.rbcs.common.ByteBufOutputStream
|
|
||||||
import net.woggioni.rbcs.common.RBCS.digestString
|
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
|
||||||
import net.woggioni.jwo.JWO
|
|
||||||
import java.nio.channels.Channels
|
|
||||||
import java.security.MessageDigest
|
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.Instant
|
import java.time.Instant
|
||||||
import java.util.concurrent.CompletableFuture
|
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
import java.util.concurrent.PriorityBlockingQueue
|
import java.util.concurrent.PriorityBlockingQueue
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
import java.util.concurrent.atomic.AtomicLong
|
import java.util.concurrent.atomic.AtomicLong
|
||||||
import java.util.zip.Deflater
|
|
||||||
import java.util.zip.DeflaterOutputStream
|
private class CacheKey(private val value: ByteArray) {
|
||||||
import java.util.zip.Inflater
|
override fun equals(other: Any?) = if (other is CacheKey) {
|
||||||
import java.util.zip.InflaterInputStream
|
value.contentEquals(other.value)
|
||||||
|
} else false
|
||||||
|
|
||||||
|
override fun hashCode() = value.contentHashCode()
|
||||||
|
}
|
||||||
|
|
||||||
|
class CacheEntry(
|
||||||
|
val metadata: CacheValueMetadata,
|
||||||
|
val content: ByteBuf
|
||||||
|
)
|
||||||
|
|
||||||
class InMemoryCache(
|
class InMemoryCache(
|
||||||
val maxAge: Duration,
|
private val maxAge: Duration,
|
||||||
val maxSize: Long,
|
private val maxSize: Long
|
||||||
val digestAlgorithm: String?,
|
) : AutoCloseable {
|
||||||
val compressionEnabled: Boolean,
|
|
||||||
val compressionLevel: Int
|
|
||||||
) : Cache {
|
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
@JvmStatic
|
private val log = createLogger<InMemoryCache>()
|
||||||
private val log = contextLogger()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private val size = AtomicLong()
|
private val size = AtomicLong()
|
||||||
private val map = ConcurrentHashMap<String, ByteBuf>()
|
private val map = ConcurrentHashMap<CacheKey, CacheEntry>()
|
||||||
|
|
||||||
private class RemovalQueueElement(val key: String, val value : ByteBuf, val expiry : Instant) : Comparable<RemovalQueueElement> {
|
private class RemovalQueueElement(val key: CacheKey, val value: CacheEntry, val expiry: Instant) :
|
||||||
|
Comparable<RemovalQueueElement> {
|
||||||
override fun compareTo(other: RemovalQueueElement) = expiry.compareTo(other.expiry)
|
override fun compareTo(other: RemovalQueueElement) = expiry.compareTo(other.expiry)
|
||||||
}
|
}
|
||||||
|
|
||||||
private val removalQueue = PriorityBlockingQueue<RemovalQueueElement>()
|
private val removalQueue = PriorityBlockingQueue<RemovalQueueElement>()
|
||||||
|
|
||||||
|
@Volatile
|
||||||
private var running = true
|
private var running = true
|
||||||
private val garbageCollector = Thread {
|
|
||||||
while(true) {
|
private val garbageCollector = Thread.ofVirtual().name("in-memory-cache-gc").start {
|
||||||
val el = removalQueue.take()
|
while (running) {
|
||||||
val buf = el.value
|
val el = removalQueue.poll(1, TimeUnit.SECONDS) ?: continue
|
||||||
|
val value = el.value
|
||||||
val now = Instant.now()
|
val now = Instant.now()
|
||||||
if(now > el.expiry) {
|
if (now > el.expiry) {
|
||||||
val removed = map.remove(el.key, buf)
|
val removed = map.remove(el.key, value)
|
||||||
if(removed) {
|
if (removed) {
|
||||||
updateSizeAfterRemoval(buf)
|
updateSizeAfterRemoval(value.content)
|
||||||
//Decrease the reference count for map
|
//Decrease the reference count for map
|
||||||
buf.release()
|
value.content.release()
|
||||||
}
|
}
|
||||||
//Decrease the reference count for removalQueue
|
|
||||||
buf.release()
|
|
||||||
} else {
|
} else {
|
||||||
removalQueue.put(el)
|
removalQueue.put(el)
|
||||||
Thread.sleep(minOf(Duration.between(now, el.expiry), Duration.ofSeconds(1)))
|
Thread.sleep(minOf(Duration.between(now, el.expiry), Duration.ofSeconds(1)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}.apply {
|
|
||||||
start()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun removeEldest() : Long {
|
private fun removeEldest(): Long {
|
||||||
while(true) {
|
while (true) {
|
||||||
val el = removalQueue.take()
|
val el = removalQueue.take()
|
||||||
val buf = el.value
|
val value = el.value
|
||||||
val removed = map.remove(el.key, buf)
|
val removed = map.remove(el.key, value)
|
||||||
//Decrease the reference count for removalQueue
|
if (removed) {
|
||||||
buf.release()
|
val newSize = updateSizeAfterRemoval(value.content)
|
||||||
if(removed) {
|
|
||||||
val newSize = updateSizeAfterRemoval(buf)
|
|
||||||
//Decrease the reference count for map
|
//Decrease the reference count for map
|
||||||
buf.release()
|
value.content.release()
|
||||||
return newSize
|
return newSize
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun updateSizeAfterRemoval(removed: ByteBuf) : Long {
|
private fun updateSizeAfterRemoval(removed: ByteBuf): Long {
|
||||||
return size.updateAndGet { currentSize : Long ->
|
return size.updateAndGet { currentSize: Long ->
|
||||||
currentSize - removed.readableBytes()
|
currentSize - removed.readableBytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -93,58 +89,27 @@ class InMemoryCache(
|
|||||||
garbageCollector.join()
|
garbageCollector.join()
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun get(key: String) =
|
fun get(key: ByteArray) = map[CacheKey(key)]?.run {
|
||||||
(digestAlgorithm
|
CacheEntry(metadata, content.retainedDuplicate())
|
||||||
?.let(MessageDigest::getInstance)
|
}
|
||||||
?.let { md ->
|
|
||||||
digestString(key.toByteArray(), md)
|
|
||||||
} ?: key
|
|
||||||
).let { digest ->
|
|
||||||
map[digest]
|
|
||||||
?.let { value ->
|
|
||||||
val copy = value.retainedDuplicate()
|
|
||||||
copy.touch("This has to be released by the caller of the cache")
|
|
||||||
if (compressionEnabled) {
|
|
||||||
val inflater = Inflater()
|
|
||||||
Channels.newChannel(InflaterInputStream(ByteBufInputStream(copy), inflater))
|
|
||||||
} else {
|
|
||||||
Channels.newChannel(ByteBufInputStream(copy))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}.let {
|
|
||||||
CompletableFuture.completedFuture(it)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun put(key: String, content: ByteBuf) =
|
fun put(
|
||||||
(digestAlgorithm
|
key: ByteArray,
|
||||||
?.let(MessageDigest::getInstance)
|
value: CacheEntry,
|
||||||
?.let { md ->
|
) {
|
||||||
digestString(key.toByteArray(), md)
|
val cacheKey = CacheKey(key)
|
||||||
} ?: key).let { digest ->
|
val oldSize = map.put(cacheKey, value)?.let { old ->
|
||||||
content.retain()
|
val result = old.content.readableBytes()
|
||||||
val value = if (compressionEnabled) {
|
old.content.release()
|
||||||
val deflater = Deflater(compressionLevel)
|
result
|
||||||
val buf = content.alloc().buffer()
|
} ?: 0
|
||||||
buf.retain()
|
val delta = value.content.readableBytes() - oldSize
|
||||||
DeflaterOutputStream(ByteBufOutputStream(buf), deflater).use { outputStream ->
|
var newSize = size.updateAndGet { currentSize: Long ->
|
||||||
ByteBufInputStream(content).use { inputStream ->
|
currentSize + delta
|
||||||
JWO.copy(inputStream, outputStream)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf
|
|
||||||
} else {
|
|
||||||
content
|
|
||||||
}
|
|
||||||
val old = map.put(digest, value)
|
|
||||||
val delta = value.readableBytes() - (old?.readableBytes() ?: 0)
|
|
||||||
var newSize = size.updateAndGet { currentSize : Long ->
|
|
||||||
currentSize + delta
|
|
||||||
}
|
|
||||||
removalQueue.put(RemovalQueueElement(digest, value.retain(), Instant.now().plus(maxAge)))
|
|
||||||
while(newSize > maxSize) {
|
|
||||||
newSize = removeEldest()
|
|
||||||
}
|
|
||||||
}.let {
|
|
||||||
CompletableFuture.completedFuture<Void>(null)
|
|
||||||
}
|
}
|
||||||
|
removalQueue.put(RemovalQueueElement(cacheKey, value, Instant.now().plus(maxAge)))
|
||||||
|
while (newSize > maxSize) {
|
||||||
|
newSize = removeEldest()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
@@ -1,5 +1,6 @@
|
|||||||
package net.woggioni.rbcs.server.cache
|
package net.woggioni.rbcs.server.cache
|
||||||
|
|
||||||
|
import net.woggioni.rbcs.api.CacheHandlerFactory
|
||||||
import net.woggioni.rbcs.api.Configuration
|
import net.woggioni.rbcs.api.Configuration
|
||||||
import net.woggioni.rbcs.common.RBCS
|
import net.woggioni.rbcs.common.RBCS
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
@@ -10,14 +11,17 @@ data class InMemoryCacheConfiguration(
|
|||||||
val digestAlgorithm : String?,
|
val digestAlgorithm : String?,
|
||||||
val compressionEnabled: Boolean,
|
val compressionEnabled: Boolean,
|
||||||
val compressionLevel: Int,
|
val compressionLevel: Int,
|
||||||
|
val chunkSize : Int
|
||||||
) : Configuration.Cache {
|
) : Configuration.Cache {
|
||||||
override fun materialize() = InMemoryCache(
|
override fun materialize() = object : CacheHandlerFactory {
|
||||||
maxAge,
|
private val cache = InMemoryCache(maxAge, maxSize)
|
||||||
maxSize,
|
|
||||||
digestAlgorithm,
|
override fun close() {
|
||||||
compressionEnabled,
|
cache.close()
|
||||||
compressionLevel
|
}
|
||||||
)
|
|
||||||
|
override fun newHandler() = InMemoryCacheHandler(cache, digestAlgorithm, compressionEnabled, compressionLevel)
|
||||||
|
}
|
||||||
|
|
||||||
override fun getNamespaceURI() = RBCS.RBCS_NAMESPACE_URI
|
override fun getNamespaceURI() = RBCS.RBCS_NAMESPACE_URI
|
||||||
|
|
||||||
|
135
rbcs-server/src/main/kotlin/net/woggioni/rbcs/server/cache/InMemoryCacheHandler.kt
vendored
Normal file
135
rbcs-server/src/main/kotlin/net/woggioni/rbcs/server/cache/InMemoryCacheHandler.kt
vendored
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package net.woggioni.rbcs.server.cache
|
||||||
|
|
||||||
|
import io.netty.buffer.ByteBuf
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.SimpleChannelInboundHandler
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheContent
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheGetRequest
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutRequest
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueNotFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.LastCacheContent
|
||||||
|
import net.woggioni.rbcs.common.ByteBufOutputStream
|
||||||
|
import net.woggioni.rbcs.common.RBCS.processCacheKey
|
||||||
|
import java.util.zip.Deflater
|
||||||
|
import java.util.zip.DeflaterOutputStream
|
||||||
|
import java.util.zip.InflaterOutputStream
|
||||||
|
|
||||||
|
class InMemoryCacheHandler(
|
||||||
|
private val cache: InMemoryCache,
|
||||||
|
private val digestAlgorithm: String?,
|
||||||
|
private val compressionEnabled: Boolean,
|
||||||
|
private val compressionLevel: Int
|
||||||
|
) : SimpleChannelInboundHandler<CacheMessage>() {
|
||||||
|
|
||||||
|
private interface InProgressPutRequest : AutoCloseable {
|
||||||
|
val request: CachePutRequest
|
||||||
|
val buf: ByteBuf
|
||||||
|
|
||||||
|
fun append(buf: ByteBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
private inner class InProgressPlainPutRequest(ctx: ChannelHandlerContext, override val request: CachePutRequest) :
|
||||||
|
InProgressPutRequest {
|
||||||
|
override val buf = ctx.alloc().compositeBuffer()
|
||||||
|
|
||||||
|
private val stream = ByteBufOutputStream(buf).let {
|
||||||
|
if (compressionEnabled) {
|
||||||
|
DeflaterOutputStream(it, Deflater(compressionLevel))
|
||||||
|
} else {
|
||||||
|
it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun append(buf: ByteBuf) {
|
||||||
|
this.buf.addComponent(true, buf.retain())
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun close() {
|
||||||
|
buf.release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private inner class InProgressCompressedPutRequest(
|
||||||
|
ctx: ChannelHandlerContext,
|
||||||
|
override val request: CachePutRequest
|
||||||
|
) : InProgressPutRequest {
|
||||||
|
|
||||||
|
override val buf = ctx.alloc().heapBuffer()
|
||||||
|
|
||||||
|
private val stream = ByteBufOutputStream(buf).let {
|
||||||
|
DeflaterOutputStream(it, Deflater(compressionLevel))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun append(buf: ByteBuf) {
|
||||||
|
buf.readBytes(stream, buf.readableBytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun close() {
|
||||||
|
stream.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private var inProgressPutRequest: InProgressPutRequest? = null
|
||||||
|
|
||||||
|
override fun channelRead0(ctx: ChannelHandlerContext, msg: CacheMessage) {
|
||||||
|
when (msg) {
|
||||||
|
is CacheGetRequest -> handleGetRequest(ctx, msg)
|
||||||
|
is CachePutRequest -> handlePutRequest(ctx, msg)
|
||||||
|
is LastCacheContent -> handleLastCacheContent(ctx, msg)
|
||||||
|
is CacheContent -> handleCacheContent(ctx, msg)
|
||||||
|
else -> ctx.fireChannelRead(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleGetRequest(ctx: ChannelHandlerContext, msg: CacheGetRequest) {
|
||||||
|
cache.get(processCacheKey(msg.key, digestAlgorithm))?.let { value ->
|
||||||
|
ctx.writeAndFlush(CacheValueFoundResponse(msg.key, value.metadata))
|
||||||
|
if (compressionEnabled) {
|
||||||
|
val buf = ctx.alloc().heapBuffer()
|
||||||
|
InflaterOutputStream(ByteBufOutputStream(buf)).use {
|
||||||
|
value.content.readBytes(it, value.content.readableBytes())
|
||||||
|
buf.retain()
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(LastCacheContent(buf))
|
||||||
|
} else {
|
||||||
|
ctx.writeAndFlush(LastCacheContent(value.content))
|
||||||
|
}
|
||||||
|
} ?: ctx.writeAndFlush(CacheValueNotFoundResponse())
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handlePutRequest(ctx: ChannelHandlerContext, msg: CachePutRequest) {
|
||||||
|
inProgressPutRequest = if(compressionEnabled) {
|
||||||
|
InProgressCompressedPutRequest(ctx, msg)
|
||||||
|
} else {
|
||||||
|
InProgressPlainPutRequest(ctx, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleCacheContent(ctx: ChannelHandlerContext, msg: CacheContent) {
|
||||||
|
inProgressPutRequest?.append(msg.content())
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleLastCacheContent(ctx: ChannelHandlerContext, msg: LastCacheContent) {
|
||||||
|
handleCacheContent(ctx, msg)
|
||||||
|
inProgressPutRequest?.let { inProgressRequest ->
|
||||||
|
inProgressPutRequest = null
|
||||||
|
val buf = inProgressRequest.buf
|
||||||
|
buf.retain()
|
||||||
|
inProgressRequest.close()
|
||||||
|
val cacheKey = processCacheKey(inProgressRequest.request.key, digestAlgorithm)
|
||||||
|
cache.put(cacheKey, CacheEntry(inProgressRequest.request.metadata, buf))
|
||||||
|
ctx.writeAndFlush(CachePutResponse(inProgressRequest.request.key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
|
inProgressPutRequest?.let { req ->
|
||||||
|
req.buf.release()
|
||||||
|
inProgressPutRequest = null
|
||||||
|
}
|
||||||
|
super.exceptionCaught(ctx, cause)
|
||||||
|
}
|
||||||
|
}
|
@@ -31,13 +31,16 @@ class InMemoryCacheProvider : CacheProvider<InMemoryCacheConfiguration> {
|
|||||||
?.let(String::toInt)
|
?.let(String::toInt)
|
||||||
?: Deflater.DEFAULT_COMPRESSION
|
?: Deflater.DEFAULT_COMPRESSION
|
||||||
val digestAlgorithm = el.renderAttribute("digest") ?: "MD5"
|
val digestAlgorithm = el.renderAttribute("digest") ?: "MD5"
|
||||||
|
val chunkSize = el.renderAttribute("chunk-size")
|
||||||
|
?.let(Integer::decode)
|
||||||
|
?: 0x10000
|
||||||
return InMemoryCacheConfiguration(
|
return InMemoryCacheConfiguration(
|
||||||
maxAge,
|
maxAge,
|
||||||
maxSize,
|
maxSize,
|
||||||
digestAlgorithm,
|
digestAlgorithm,
|
||||||
enableCompression,
|
enableCompression,
|
||||||
compressionLevel
|
compressionLevel,
|
||||||
|
chunkSize
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,6 +60,7 @@ class InMemoryCacheProvider : CacheProvider<InMemoryCacheConfiguration> {
|
|||||||
}?.let {
|
}?.let {
|
||||||
attr("compression-level", it.toString())
|
attr("compression-level", it.toString())
|
||||||
}
|
}
|
||||||
|
attr("chunk-size", chunkSize.toString())
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
@@ -124,7 +124,7 @@ object Parser {
|
|||||||
val writeIdleTimeout = child.renderAttribute("write-idle-timeout")
|
val writeIdleTimeout = child.renderAttribute("write-idle-timeout")
|
||||||
?.let(Duration::parse) ?: Duration.of(60, ChronoUnit.SECONDS)
|
?.let(Duration::parse) ?: Duration.of(60, ChronoUnit.SECONDS)
|
||||||
val maxRequestSize = child.renderAttribute("max-request-size")
|
val maxRequestSize = child.renderAttribute("max-request-size")
|
||||||
?.let(String::toInt) ?: 67108864
|
?.let(Integer::decode) ?: 0x4000000
|
||||||
connection = Configuration.Connection(
|
connection = Configuration.Connection(
|
||||||
readTimeout,
|
readTimeout,
|
||||||
writeTimeout,
|
writeTimeout,
|
||||||
|
@@ -3,7 +3,7 @@ package net.woggioni.rbcs.server.exception
|
|||||||
import io.netty.buffer.Unpooled
|
import io.netty.buffer.Unpooled
|
||||||
import io.netty.channel.ChannelDuplexHandler
|
import io.netty.channel.ChannelDuplexHandler
|
||||||
import io.netty.channel.ChannelFutureListener
|
import io.netty.channel.ChannelFutureListener
|
||||||
import io.netty.channel.ChannelHandler
|
import io.netty.channel.ChannelHandler.Sharable
|
||||||
import io.netty.channel.ChannelHandlerContext
|
import io.netty.channel.ChannelHandlerContext
|
||||||
import io.netty.handler.codec.DecoderException
|
import io.netty.handler.codec.DecoderException
|
||||||
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
||||||
@@ -17,10 +17,16 @@ import net.woggioni.rbcs.api.exception.CacheException
|
|||||||
import net.woggioni.rbcs.api.exception.ContentTooLargeException
|
import net.woggioni.rbcs.api.exception.ContentTooLargeException
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.common.contextLogger
|
||||||
import net.woggioni.rbcs.common.debug
|
import net.woggioni.rbcs.common.debug
|
||||||
|
import net.woggioni.rbcs.common.log
|
||||||
|
import org.slf4j.event.Level
|
||||||
|
import org.slf4j.spi.LoggingEventBuilder
|
||||||
|
import java.net.ConnectException
|
||||||
|
import java.net.SocketException
|
||||||
|
import javax.net.ssl.SSLException
|
||||||
import javax.net.ssl.SSLPeerUnverifiedException
|
import javax.net.ssl.SSLPeerUnverifiedException
|
||||||
|
|
||||||
@ChannelHandler.Sharable
|
@Sharable
|
||||||
class ExceptionHandler : ChannelDuplexHandler() {
|
object ExceptionHandler : ChannelDuplexHandler() {
|
||||||
private val log = contextLogger()
|
private val log = contextLogger()
|
||||||
|
|
||||||
private val NOT_AUTHORIZED: FullHttpResponse = DefaultFullHttpResponse(
|
private val NOT_AUTHORIZED: FullHttpResponse = DefaultFullHttpResponse(
|
||||||
@@ -29,12 +35,6 @@ class ExceptionHandler : ChannelDuplexHandler() {
|
|||||||
headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
||||||
}
|
}
|
||||||
|
|
||||||
private val TOO_BIG: FullHttpResponse = DefaultFullHttpResponse(
|
|
||||||
HttpVersion.HTTP_1_1, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, Unpooled.EMPTY_BUFFER
|
|
||||||
).apply {
|
|
||||||
headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
|
||||||
}
|
|
||||||
|
|
||||||
private val NOT_AVAILABLE: FullHttpResponse = DefaultFullHttpResponse(
|
private val NOT_AVAILABLE: FullHttpResponse = DefaultFullHttpResponse(
|
||||||
HttpVersion.HTTP_1_1, HttpResponseStatus.SERVICE_UNAVAILABLE, Unpooled.EMPTY_BUFFER
|
HttpVersion.HTTP_1_1, HttpResponseStatus.SERVICE_UNAVAILABLE, Unpooled.EMPTY_BUFFER
|
||||||
).apply {
|
).apply {
|
||||||
@@ -47,10 +47,26 @@ class ExceptionHandler : ChannelDuplexHandler() {
|
|||||||
headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private val TOO_BIG: FullHttpResponse = DefaultFullHttpResponse(
|
||||||
|
HttpVersion.HTTP_1_1, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, Unpooled.EMPTY_BUFFER
|
||||||
|
).apply {
|
||||||
|
headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
||||||
|
}
|
||||||
|
|
||||||
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
when (cause) {
|
when (cause) {
|
||||||
is DecoderException -> {
|
is DecoderException -> {
|
||||||
|
log.debug(cause.message, cause)
|
||||||
|
ctx.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
is ConnectException -> {
|
||||||
log.error(cause.message, cause)
|
log.error(cause.message, cause)
|
||||||
|
ctx.writeAndFlush(SERVER_ERROR.retainedDuplicate())
|
||||||
|
}
|
||||||
|
|
||||||
|
is SocketException -> {
|
||||||
|
log.debug(cause.message, cause)
|
||||||
ctx.close()
|
ctx.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,10 +75,19 @@ class ExceptionHandler : ChannelDuplexHandler() {
|
|||||||
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE)
|
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
is SSLException -> {
|
||||||
|
log.debug(cause.message, cause)
|
||||||
|
ctx.close()
|
||||||
|
}
|
||||||
|
|
||||||
is ContentTooLargeException -> {
|
is ContentTooLargeException -> {
|
||||||
|
log.log(Level.DEBUG, ctx.channel()) { builder : LoggingEventBuilder ->
|
||||||
|
builder.setMessage("Request body is too large")
|
||||||
|
}
|
||||||
ctx.writeAndFlush(TOO_BIG.retainedDuplicate())
|
ctx.writeAndFlush(TOO_BIG.retainedDuplicate())
|
||||||
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE)
|
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE)
|
||||||
}
|
}
|
||||||
|
|
||||||
is ReadTimeoutException -> {
|
is ReadTimeoutException -> {
|
||||||
log.debug {
|
log.debug {
|
||||||
val channelId = ctx.channel().id().asShortText()
|
val channelId = ctx.channel().id().asShortText()
|
||||||
@@ -70,6 +95,7 @@ class ExceptionHandler : ChannelDuplexHandler() {
|
|||||||
}
|
}
|
||||||
ctx.close()
|
ctx.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
is WriteTimeoutException -> {
|
is WriteTimeoutException -> {
|
||||||
log.debug {
|
log.debug {
|
||||||
val channelId = ctx.channel().id().asShortText()
|
val channelId = ctx.channel().id().asShortText()
|
||||||
@@ -77,11 +103,13 @@ class ExceptionHandler : ChannelDuplexHandler() {
|
|||||||
}
|
}
|
||||||
ctx.close()
|
ctx.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
is CacheException -> {
|
is CacheException -> {
|
||||||
log.error(cause.message, cause)
|
log.error(cause.message, cause)
|
||||||
ctx.writeAndFlush(NOT_AVAILABLE.retainedDuplicate())
|
ctx.writeAndFlush(NOT_AVAILABLE.retainedDuplicate())
|
||||||
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE)
|
.addListener(ChannelFutureListener.CLOSE_ON_FAILURE)
|
||||||
}
|
}
|
||||||
|
|
||||||
else -> {
|
else -> {
|
||||||
log.error(cause.message, cause)
|
log.error(cause.message, cause)
|
||||||
ctx.writeAndFlush(SERVER_ERROR.retainedDuplicate())
|
ctx.writeAndFlush(SERVER_ERROR.retainedDuplicate())
|
||||||
|
@@ -0,0 +1,28 @@
|
|||||||
|
package net.woggioni.rbcs.server.handler
|
||||||
|
|
||||||
|
import io.netty.channel.ChannelHandler.Sharable
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.SimpleChannelInboundHandler
|
||||||
|
import io.netty.handler.codec.http.HttpContent
|
||||||
|
import io.netty.handler.codec.http.LastHttpContent
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheContent
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.LastCacheContent
|
||||||
|
|
||||||
|
@Sharable
|
||||||
|
object CacheContentHandler : SimpleChannelInboundHandler<HttpContent>() {
|
||||||
|
val NAME = this::class.java.name
|
||||||
|
|
||||||
|
override fun channelRead0(ctx: ChannelHandlerContext, msg: HttpContent) {
|
||||||
|
when(msg) {
|
||||||
|
is LastHttpContent -> {
|
||||||
|
ctx.fireChannelRead(LastCacheContent(msg.content().retain()))
|
||||||
|
ctx.pipeline().remove(this)
|
||||||
|
}
|
||||||
|
else -> ctx.fireChannelRead(CacheContent(msg.content().retain()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext?, cause: Throwable?) {
|
||||||
|
super.exceptionCaught(ctx, cause)
|
||||||
|
}
|
||||||
|
}
|
@@ -0,0 +1,40 @@
|
|||||||
|
package net.woggioni.rbcs.server.handler
|
||||||
|
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.ChannelInboundHandlerAdapter
|
||||||
|
import io.netty.handler.codec.http.HttpContent
|
||||||
|
import io.netty.handler.codec.http.HttpRequest
|
||||||
|
import net.woggioni.rbcs.api.exception.ContentTooLargeException
|
||||||
|
|
||||||
|
|
||||||
|
class MaxRequestSizeHandler(private val maxRequestSize : Int) : ChannelInboundHandlerAdapter() {
|
||||||
|
companion object {
|
||||||
|
val NAME = MaxRequestSizeHandler::class.java.name
|
||||||
|
}
|
||||||
|
|
||||||
|
private var cumulativeSize = 0
|
||||||
|
|
||||||
|
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
|
||||||
|
when(msg) {
|
||||||
|
is HttpRequest -> {
|
||||||
|
cumulativeSize = 0
|
||||||
|
ctx.fireChannelRead(msg)
|
||||||
|
}
|
||||||
|
is HttpContent -> {
|
||||||
|
val exceeded = cumulativeSize > maxRequestSize
|
||||||
|
if(!exceeded) {
|
||||||
|
cumulativeSize += msg.content().readableBytes()
|
||||||
|
}
|
||||||
|
if(cumulativeSize > maxRequestSize) {
|
||||||
|
msg.release()
|
||||||
|
if(!exceeded) {
|
||||||
|
ctx.fireExceptionCaught(ContentTooLargeException("Request body is too large", null))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ctx.fireChannelRead(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else -> ctx.fireChannelRead(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,95 +1,148 @@
|
|||||||
package net.woggioni.rbcs.server.handler
|
package net.woggioni.rbcs.server.handler
|
||||||
|
|
||||||
import io.netty.buffer.Unpooled
|
import io.netty.channel.ChannelDuplexHandler
|
||||||
import io.netty.channel.ChannelFutureListener
|
|
||||||
import io.netty.channel.ChannelHandler
|
|
||||||
import io.netty.channel.ChannelHandlerContext
|
import io.netty.channel.ChannelHandlerContext
|
||||||
import io.netty.channel.DefaultFileRegion
|
import io.netty.channel.ChannelPromise
|
||||||
import io.netty.channel.SimpleChannelInboundHandler
|
|
||||||
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
||||||
|
import io.netty.handler.codec.http.DefaultHttpContent
|
||||||
import io.netty.handler.codec.http.DefaultHttpResponse
|
import io.netty.handler.codec.http.DefaultHttpResponse
|
||||||
import io.netty.handler.codec.http.FullHttpRequest
|
import io.netty.handler.codec.http.DefaultLastHttpContent
|
||||||
import io.netty.handler.codec.http.HttpHeaderNames
|
import io.netty.handler.codec.http.HttpHeaderNames
|
||||||
import io.netty.handler.codec.http.HttpHeaderValues
|
import io.netty.handler.codec.http.HttpHeaderValues
|
||||||
|
import io.netty.handler.codec.http.HttpHeaders
|
||||||
import io.netty.handler.codec.http.HttpMethod
|
import io.netty.handler.codec.http.HttpMethod
|
||||||
|
import io.netty.handler.codec.http.HttpRequest
|
||||||
import io.netty.handler.codec.http.HttpResponseStatus
|
import io.netty.handler.codec.http.HttpResponseStatus
|
||||||
import io.netty.handler.codec.http.HttpUtil
|
import io.netty.handler.codec.http.HttpUtil
|
||||||
import io.netty.handler.codec.http.LastHttpContent
|
import io.netty.handler.codec.http.HttpVersion
|
||||||
import io.netty.handler.stream.ChunkedNioStream
|
import net.woggioni.rbcs.api.CacheValueMetadata
|
||||||
import net.woggioni.rbcs.api.Cache
|
import net.woggioni.rbcs.api.message.CacheMessage
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheContent
|
||||||
import net.woggioni.rbcs.server.debug
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheGetRequest
|
||||||
import net.woggioni.rbcs.server.warn
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutRequest
|
||||||
import java.nio.channels.FileChannel
|
import net.woggioni.rbcs.api.message.CacheMessage.CachePutResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.CacheValueNotFoundResponse
|
||||||
|
import net.woggioni.rbcs.api.message.CacheMessage.LastCacheContent
|
||||||
|
import net.woggioni.rbcs.common.createLogger
|
||||||
|
import net.woggioni.rbcs.common.debug
|
||||||
|
import net.woggioni.rbcs.common.warn
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
|
import java.util.Locale
|
||||||
|
|
||||||
@ChannelHandler.Sharable
|
class ServerHandler(private val serverPrefix: Path) :
|
||||||
class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
|
ChannelDuplexHandler() {
|
||||||
SimpleChannelInboundHandler<FullHttpRequest>() {
|
|
||||||
|
|
||||||
private val log = contextLogger()
|
companion object {
|
||||||
|
private val log = createLogger<ServerHandler>()
|
||||||
|
val NAME = this::class.java.name
|
||||||
|
}
|
||||||
|
|
||||||
override fun channelRead0(ctx: ChannelHandlerContext, msg: FullHttpRequest) {
|
private var httpVersion = HttpVersion.HTTP_1_1
|
||||||
val keepAlive: Boolean = HttpUtil.isKeepAlive(msg)
|
private var keepAlive = true
|
||||||
|
|
||||||
|
private fun resetRequestMetadata() {
|
||||||
|
httpVersion = HttpVersion.HTTP_1_1
|
||||||
|
keepAlive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun setRequestMetadata(req: HttpRequest) {
|
||||||
|
httpVersion = req.protocolVersion()
|
||||||
|
keepAlive = HttpUtil.isKeepAlive(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun setKeepAliveHeader(headers: HttpHeaders) {
|
||||||
|
if (!keepAlive) {
|
||||||
|
headers.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE)
|
||||||
|
} else {
|
||||||
|
headers.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
|
||||||
|
when (msg) {
|
||||||
|
is HttpRequest -> handleRequest(ctx, msg)
|
||||||
|
else -> super.channelRead(ctx, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
override fun write(ctx: ChannelHandlerContext, msg: Any, promise: ChannelPromise?) {
|
||||||
|
if (msg is CacheMessage) {
|
||||||
|
try {
|
||||||
|
when (msg) {
|
||||||
|
is CachePutResponse -> {
|
||||||
|
val response = DefaultFullHttpResponse(httpVersion, HttpResponseStatus.CREATED)
|
||||||
|
val keyBytes = msg.key.toByteArray(Charsets.UTF_8)
|
||||||
|
response.headers().apply {
|
||||||
|
set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN)
|
||||||
|
set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED)
|
||||||
|
}
|
||||||
|
setKeepAliveHeader(response.headers())
|
||||||
|
ctx.write(response)
|
||||||
|
val buf = ctx.alloc().buffer(keyBytes.size).apply {
|
||||||
|
writeBytes(keyBytes)
|
||||||
|
}
|
||||||
|
ctx.writeAndFlush(DefaultLastHttpContent(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
is CacheValueNotFoundResponse -> {
|
||||||
|
val response = DefaultFullHttpResponse(httpVersion, HttpResponseStatus.NOT_FOUND)
|
||||||
|
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = 0
|
||||||
|
setKeepAliveHeader(response.headers())
|
||||||
|
ctx.writeAndFlush(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
is CacheValueFoundResponse -> {
|
||||||
|
val response = DefaultHttpResponse(httpVersion, HttpResponseStatus.OK)
|
||||||
|
response.headers().apply {
|
||||||
|
set(HttpHeaderNames.CONTENT_TYPE, msg.metadata.mimeType ?: HttpHeaderValues.APPLICATION_OCTET_STREAM)
|
||||||
|
msg.metadata.contentDisposition?.let { contentDisposition ->
|
||||||
|
set(HttpHeaderNames.CONTENT_DISPOSITION, contentDisposition)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
setKeepAliveHeader(response.headers())
|
||||||
|
response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED)
|
||||||
|
ctx.writeAndFlush(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
is LastCacheContent -> {
|
||||||
|
ctx.writeAndFlush(DefaultLastHttpContent(msg.content()))
|
||||||
|
}
|
||||||
|
|
||||||
|
is CacheContent -> {
|
||||||
|
ctx.writeAndFlush(DefaultHttpContent(msg.content()))
|
||||||
|
}
|
||||||
|
|
||||||
|
else -> throw UnsupportedOperationException("This should never happen")
|
||||||
|
}.let { channelFuture ->
|
||||||
|
if (promise != null) {
|
||||||
|
channelFuture.addListener {
|
||||||
|
if (it.isSuccess) promise.setSuccess()
|
||||||
|
else promise.setFailure(it.cause())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
resetRequestMetadata()
|
||||||
|
}
|
||||||
|
} else super.write(ctx, msg, promise)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private fun handleRequest(ctx: ChannelHandlerContext, msg: HttpRequest) {
|
||||||
|
setRequestMetadata(msg)
|
||||||
val method = msg.method()
|
val method = msg.method()
|
||||||
if (method === HttpMethod.GET) {
|
if (method === HttpMethod.GET) {
|
||||||
val path = Path.of(msg.uri())
|
val path = Path.of(msg.uri())
|
||||||
val prefix = path.parent
|
val prefix = path.parent
|
||||||
val key = path.fileName?.toString() ?: let {
|
|
||||||
val response = DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.NOT_FOUND)
|
|
||||||
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = 0
|
|
||||||
ctx.writeAndFlush(response)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if (serverPrefix == prefix) {
|
if (serverPrefix == prefix) {
|
||||||
cache.get(key).thenApply { channel ->
|
ctx.pipeline().addAfter(NAME, CacheContentHandler.NAME, CacheContentHandler)
|
||||||
if(channel != null) {
|
path.fileName?.toString()
|
||||||
log.debug(ctx) {
|
?.let(::CacheGetRequest)
|
||||||
"Cache hit for key '$key'"
|
?.let(ctx::fireChannelRead)
|
||||||
}
|
?: ctx.channel().write(CacheValueNotFoundResponse())
|
||||||
val response = DefaultHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK)
|
|
||||||
response.headers()[HttpHeaderNames.CONTENT_TYPE] = HttpHeaderValues.APPLICATION_OCTET_STREAM
|
|
||||||
if (!keepAlive) {
|
|
||||||
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE)
|
|
||||||
response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.IDENTITY)
|
|
||||||
} else {
|
|
||||||
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE)
|
|
||||||
response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED)
|
|
||||||
}
|
|
||||||
ctx.write(response)
|
|
||||||
when (channel) {
|
|
||||||
is FileChannel -> {
|
|
||||||
val content = DefaultFileRegion(channel, 0, channel.size())
|
|
||||||
if (keepAlive) {
|
|
||||||
ctx.write(content)
|
|
||||||
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT.retainedDuplicate())
|
|
||||||
} else {
|
|
||||||
ctx.writeAndFlush(content)
|
|
||||||
.addListener(ChannelFutureListener.CLOSE)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else -> {
|
|
||||||
val content = ChunkedNioStream(channel)
|
|
||||||
if (keepAlive) {
|
|
||||||
ctx.write(content).addListener {
|
|
||||||
content.close()
|
|
||||||
}
|
|
||||||
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT.retainedDuplicate())
|
|
||||||
} else {
|
|
||||||
ctx.writeAndFlush(content)
|
|
||||||
.addListener(ChannelFutureListener.CLOSE)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.debug(ctx) {
|
|
||||||
"Cache miss for key '$key'"
|
|
||||||
}
|
|
||||||
val response = DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.NOT_FOUND)
|
|
||||||
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = 0
|
|
||||||
ctx.writeAndFlush(response)
|
|
||||||
}
|
|
||||||
}.whenComplete { _, ex -> ex?.let(ctx::fireExceptionCaught) }
|
|
||||||
} else {
|
} else {
|
||||||
log.warn(ctx) {
|
log.warn(ctx) {
|
||||||
"Got request for unhandled path '${msg.uri()}'"
|
"Got request for unhandled path '${msg.uri()}'"
|
||||||
@@ -107,16 +160,14 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
|
|||||||
log.debug(ctx) {
|
log.debug(ctx) {
|
||||||
"Added value for key '$key' to build cache"
|
"Added value for key '$key' to build cache"
|
||||||
}
|
}
|
||||||
cache.put(key, msg.content()).thenRun {
|
ctx.pipeline().addAfter(NAME, CacheContentHandler.NAME, CacheContentHandler)
|
||||||
val response = DefaultFullHttpResponse(
|
path.fileName?.toString()
|
||||||
msg.protocolVersion(), HttpResponseStatus.CREATED,
|
?.let {
|
||||||
Unpooled.copiedBuffer(key.toByteArray())
|
val mimeType = HttpUtil.getMimeType(msg)?.toString()
|
||||||
)
|
CachePutRequest(key, CacheValueMetadata(msg.headers().get(HttpHeaderNames.CONTENT_DISPOSITION), mimeType))
|
||||||
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = response.content().readableBytes()
|
}
|
||||||
ctx.writeAndFlush(response)
|
?.let(ctx::fireChannelRead)
|
||||||
}.whenComplete { _, ex ->
|
?: ctx.channel().write(CacheValueNotFoundResponse())
|
||||||
ctx.fireExceptionCaught(ex)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
log.warn(ctx) {
|
log.warn(ctx) {
|
||||||
"Got request for unhandled path '${msg.uri()}'"
|
"Got request for unhandled path '${msg.uri()}'"
|
||||||
@@ -125,30 +176,8 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
|
|||||||
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
|
||||||
ctx.writeAndFlush(response)
|
ctx.writeAndFlush(response)
|
||||||
}
|
}
|
||||||
} else if(method == HttpMethod.TRACE) {
|
} else if (method == HttpMethod.TRACE) {
|
||||||
val replayedRequestHead = ctx.alloc().buffer()
|
super.channelRead(ctx, msg)
|
||||||
replayedRequestHead.writeCharSequence("TRACE ${Path.of(msg.uri())} ${msg.protocolVersion().text()}\r\n", Charsets.US_ASCII)
|
|
||||||
msg.headers().forEach { (key, value) ->
|
|
||||||
replayedRequestHead.apply {
|
|
||||||
writeCharSequence(key, Charsets.US_ASCII)
|
|
||||||
writeCharSequence(": ", Charsets.US_ASCII)
|
|
||||||
writeCharSequence(value, Charsets.UTF_8)
|
|
||||||
writeCharSequence("\r\n", Charsets.US_ASCII)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
replayedRequestHead.writeCharSequence("\r\n", Charsets.US_ASCII)
|
|
||||||
val requestBody = msg.content()
|
|
||||||
requestBody.retain()
|
|
||||||
val responseBody = ctx.alloc().compositeBuffer(2).apply {
|
|
||||||
addComponents(true, replayedRequestHead)
|
|
||||||
addComponents(true, requestBody)
|
|
||||||
}
|
|
||||||
val response = DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK, responseBody)
|
|
||||||
response.headers().apply {
|
|
||||||
set(HttpHeaderNames.CONTENT_TYPE, "message/http")
|
|
||||||
set(HttpHeaderNames.CONTENT_LENGTH, responseBody.readableBytes())
|
|
||||||
}
|
|
||||||
ctx.writeAndFlush(response)
|
|
||||||
} else {
|
} else {
|
||||||
log.warn(ctx) {
|
log.warn(ctx) {
|
||||||
"Got request with unhandled method '${msg.method().name()}'"
|
"Got request with unhandled method '${msg.method().name()}'"
|
||||||
@@ -158,4 +187,44 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
|
|||||||
ctx.writeAndFlush(response)
|
ctx.writeAndFlush(response)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
data class ContentDisposition(val type: Type?, val fileName: String?) {
|
||||||
|
enum class Type {
|
||||||
|
attachment, `inline`;
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
@JvmStatic
|
||||||
|
fun parse(maybeString: String?) = maybeString.let { s ->
|
||||||
|
try {
|
||||||
|
java.lang.Enum.valueOf(Type::class.java, s)
|
||||||
|
} catch (ex: IllegalArgumentException) {
|
||||||
|
null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
@JvmStatic
|
||||||
|
fun parse(contentDisposition: String) : ContentDisposition {
|
||||||
|
val parts = contentDisposition.split(";").dropLastWhile { it.isEmpty() }.toTypedArray()
|
||||||
|
val dispositionType = parts[0].trim { it <= ' ' }.let(Type::parse) // Get the type (e.g., attachment)
|
||||||
|
|
||||||
|
var filename: String? = null
|
||||||
|
for (i in 1..<parts.size) {
|
||||||
|
val part = parts[i].trim { it <= ' ' }
|
||||||
|
if (part.lowercase(Locale.getDefault()).startsWith("filename=")) {
|
||||||
|
filename = part.substring("filename=".length).trim { it <= ' ' }.replace("\"", "")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ContentDisposition(dispositionType, filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
|
||||||
|
super.exceptionCaught(ctx, cause)
|
||||||
|
}
|
||||||
}
|
}
|
@@ -0,0 +1,54 @@
|
|||||||
|
package net.woggioni.rbcs.server.handler
|
||||||
|
|
||||||
|
import io.netty.channel.ChannelHandler.Sharable
|
||||||
|
import io.netty.channel.ChannelHandlerContext
|
||||||
|
import io.netty.channel.ChannelInboundHandlerAdapter
|
||||||
|
import io.netty.handler.codec.http.DefaultHttpResponse
|
||||||
|
import io.netty.handler.codec.http.HttpContent
|
||||||
|
import io.netty.handler.codec.http.HttpHeaderNames
|
||||||
|
import io.netty.handler.codec.http.HttpHeaderValues
|
||||||
|
import io.netty.handler.codec.http.HttpRequest
|
||||||
|
import io.netty.handler.codec.http.HttpResponseStatus
|
||||||
|
import io.netty.handler.codec.http.LastHttpContent
|
||||||
|
import java.nio.file.Path
|
||||||
|
|
||||||
|
@Sharable
|
||||||
|
object TraceHandler : ChannelInboundHandlerAdapter() {
|
||||||
|
val NAME = this::class.java.name
|
||||||
|
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
|
||||||
|
when(msg) {
|
||||||
|
is HttpRequest -> {
|
||||||
|
val response = DefaultHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK)
|
||||||
|
response.headers().apply {
|
||||||
|
set(HttpHeaderNames.CONTENT_TYPE, "message/http")
|
||||||
|
set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED)
|
||||||
|
}
|
||||||
|
ctx.write(response)
|
||||||
|
val replayedRequestHead = ctx.alloc().buffer()
|
||||||
|
replayedRequestHead.writeCharSequence(
|
||||||
|
"TRACE ${Path.of(msg.uri())} ${msg.protocolVersion().text()}\r\n",
|
||||||
|
Charsets.US_ASCII
|
||||||
|
)
|
||||||
|
msg.headers().forEach { (key, value) ->
|
||||||
|
replayedRequestHead.apply {
|
||||||
|
writeCharSequence(key, Charsets.US_ASCII)
|
||||||
|
writeCharSequence(": ", Charsets.US_ASCII)
|
||||||
|
writeCharSequence(value, Charsets.UTF_8)
|
||||||
|
writeCharSequence("\r\n", Charsets.US_ASCII)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
replayedRequestHead.writeCharSequence("\r\n", Charsets.US_ASCII)
|
||||||
|
ctx.writeAndFlush(replayedRequestHead)
|
||||||
|
}
|
||||||
|
is LastHttpContent -> {
|
||||||
|
ctx.writeAndFlush(msg)
|
||||||
|
}
|
||||||
|
is HttpContent -> ctx.writeAndFlush(msg)
|
||||||
|
else -> super.channelRead(ctx, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun exceptionCaught(ctx: ChannelHandlerContext?, cause: Throwable?) {
|
||||||
|
super.exceptionCaught(ctx, cause)
|
||||||
|
}
|
||||||
|
}
|
@@ -1,7 +1,7 @@
|
|||||||
package net.woggioni.rbcs.server.throttling
|
package net.woggioni.rbcs.server.throttling
|
||||||
|
|
||||||
import net.woggioni.rbcs.api.Configuration
|
|
||||||
import net.woggioni.jwo.Bucket
|
import net.woggioni.jwo.Bucket
|
||||||
|
import net.woggioni.rbcs.api.Configuration
|
||||||
import java.net.InetSocketAddress
|
import java.net.InetSocketAddress
|
||||||
import java.util.Arrays
|
import java.util.Arrays
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
|
@@ -1,31 +1,32 @@
|
|||||||
package net.woggioni.rbcs.server.throttling
|
package net.woggioni.rbcs.server.throttling
|
||||||
|
|
||||||
import io.netty.channel.ChannelHandler.Sharable
|
|
||||||
import io.netty.channel.ChannelHandlerContext
|
import io.netty.channel.ChannelHandlerContext
|
||||||
import io.netty.channel.ChannelInboundHandlerAdapter
|
import io.netty.channel.ChannelInboundHandlerAdapter
|
||||||
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
import io.netty.handler.codec.http.DefaultFullHttpResponse
|
||||||
|
import io.netty.handler.codec.http.HttpContent
|
||||||
import io.netty.handler.codec.http.HttpHeaderNames
|
import io.netty.handler.codec.http.HttpHeaderNames
|
||||||
|
import io.netty.handler.codec.http.HttpRequest
|
||||||
import io.netty.handler.codec.http.HttpResponseStatus
|
import io.netty.handler.codec.http.HttpResponseStatus
|
||||||
import io.netty.handler.codec.http.HttpVersion
|
import io.netty.handler.codec.http.HttpVersion
|
||||||
import net.woggioni.rbcs.api.Configuration
|
|
||||||
import net.woggioni.rbcs.common.contextLogger
|
|
||||||
import net.woggioni.rbcs.server.RemoteBuildCacheServer
|
|
||||||
import net.woggioni.jwo.Bucket
|
import net.woggioni.jwo.Bucket
|
||||||
import net.woggioni.jwo.LongMath
|
import net.woggioni.jwo.LongMath
|
||||||
|
import net.woggioni.rbcs.api.Configuration
|
||||||
|
import net.woggioni.rbcs.common.createLogger
|
||||||
|
import net.woggioni.rbcs.server.RemoteBuildCacheServer
|
||||||
import java.net.InetSocketAddress
|
import java.net.InetSocketAddress
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.temporal.ChronoUnit
|
import java.time.temporal.ChronoUnit
|
||||||
import java.util.concurrent.TimeUnit
|
import java.util.concurrent.TimeUnit
|
||||||
|
|
||||||
|
|
||||||
@Sharable
|
class ThrottlingHandler(private val bucketManager : BucketManager,
|
||||||
class ThrottlingHandler(cfg: Configuration) :
|
private val connectionConfiguration : Configuration.Connection) : ChannelInboundHandlerAdapter() {
|
||||||
ChannelInboundHandlerAdapter() {
|
|
||||||
|
|
||||||
private val log = contextLogger()
|
private companion object {
|
||||||
private val bucketManager = BucketManager.from(cfg)
|
private val log = createLogger<ThrottlingHandler>()
|
||||||
|
}
|
||||||
|
|
||||||
private val connectionConfiguration = cfg.connection
|
private var queuedContent : MutableList<HttpContent>? = null
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the suggested waiting time from the bucket is lower than this
|
* If the suggested waiting time from the bucket is lower than this
|
||||||
@@ -38,29 +39,38 @@ class ThrottlingHandler(cfg: Configuration) :
|
|||||||
connectionConfiguration.writeIdleTimeout
|
connectionConfiguration.writeIdleTimeout
|
||||||
).dividedBy(2)
|
).dividedBy(2)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
|
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
|
||||||
val buckets = mutableListOf<Bucket>()
|
if(msg is HttpRequest) {
|
||||||
val user = ctx.channel().attr(RemoteBuildCacheServer.userAttribute).get()
|
val buckets = mutableListOf<Bucket>()
|
||||||
if (user != null) {
|
val user = ctx.channel().attr(RemoteBuildCacheServer.userAttribute).get()
|
||||||
bucketManager.getBucketByUser(user)?.let(buckets::addAll)
|
if (user != null) {
|
||||||
}
|
bucketManager.getBucketByUser(user)?.let(buckets::addAll)
|
||||||
val groups = ctx.channel().attr(RemoteBuildCacheServer.groupAttribute).get() ?: emptySet()
|
|
||||||
if (groups.isNotEmpty()) {
|
|
||||||
groups.forEach { group ->
|
|
||||||
bucketManager.getBucketByGroup(group)?.let(buckets::add)
|
|
||||||
}
|
}
|
||||||
}
|
val groups = ctx.channel().attr(RemoteBuildCacheServer.groupAttribute).get() ?: emptySet()
|
||||||
if (user == null && groups.isEmpty()) {
|
if (groups.isNotEmpty()) {
|
||||||
bucketManager.getBucketByAddress(ctx.channel().remoteAddress() as InetSocketAddress)?.let(buckets::add)
|
groups.forEach { group ->
|
||||||
}
|
bucketManager.getBucketByGroup(group)?.let(buckets::add)
|
||||||
if (buckets.isEmpty()) {
|
}
|
||||||
return super.channelRead(ctx, msg)
|
}
|
||||||
|
if (user == null && groups.isEmpty()) {
|
||||||
|
bucketManager.getBucketByAddress(ctx.channel().remoteAddress() as InetSocketAddress)?.let(buckets::add)
|
||||||
|
}
|
||||||
|
if (buckets.isEmpty()) {
|
||||||
|
super.channelRead(ctx, msg)
|
||||||
|
} else {
|
||||||
|
handleBuckets(buckets, ctx, msg, true)
|
||||||
|
}
|
||||||
|
ctx.channel().id()
|
||||||
|
} else if(msg is HttpContent) {
|
||||||
|
queuedContent?.add(msg) ?: super.channelRead(ctx, msg)
|
||||||
} else {
|
} else {
|
||||||
handleBuckets(buckets, ctx, msg, true)
|
super.channelRead(ctx, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun handleBuckets(buckets : List<Bucket>, ctx : ChannelHandlerContext, msg : Any, delayResponse : Boolean) {
|
private fun handleBuckets(buckets: List<Bucket>, ctx: ChannelHandlerContext, msg: Any, delayResponse: Boolean) {
|
||||||
var nextAttempt = -1L
|
var nextAttempt = -1L
|
||||||
for (bucket in buckets) {
|
for (bucket in buckets) {
|
||||||
val bucketNextAttempt = bucket.removeTokensWithEstimate(1)
|
val bucketNextAttempt = bucket.removeTokensWithEstimate(1)
|
||||||
@@ -68,17 +78,25 @@ class ThrottlingHandler(cfg: Configuration) :
|
|||||||
nextAttempt = bucketNextAttempt
|
nextAttempt = bucketNextAttempt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(nextAttempt < 0) {
|
if (nextAttempt < 0) {
|
||||||
super.channelRead(ctx, msg)
|
super.channelRead(ctx, msg)
|
||||||
return
|
queuedContent?.let {
|
||||||
}
|
for(content in it) {
|
||||||
val waitDuration = Duration.of(LongMath.ceilDiv(nextAttempt, 100_000_000L) * 100L, ChronoUnit.MILLIS)
|
super.channelRead(ctx, content)
|
||||||
if (delayResponse && waitDuration < waitThreshold) {
|
}
|
||||||
ctx.executor().schedule({
|
queuedContent = null
|
||||||
handleBuckets(buckets, ctx, msg, false)
|
}
|
||||||
}, waitDuration.toMillis(), TimeUnit.MILLISECONDS)
|
|
||||||
} else {
|
} else {
|
||||||
sendThrottledResponse(ctx, waitDuration)
|
val waitDuration = Duration.of(LongMath.ceilDiv(nextAttempt, 100_000_000L) * 100L, ChronoUnit.MILLIS)
|
||||||
|
if (delayResponse && waitDuration < waitThreshold) {
|
||||||
|
this.queuedContent = mutableListOf()
|
||||||
|
ctx.executor().schedule({
|
||||||
|
handleBuckets(buckets, ctx, msg, false)
|
||||||
|
}, waitDuration.toMillis(), TimeUnit.MILLISECONDS)
|
||||||
|
} else {
|
||||||
|
this.queuedContent = null
|
||||||
|
sendThrottledResponse(ctx, waitDuration)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -39,7 +39,7 @@
|
|||||||
<xs:attribute name="idle-timeout" type="xs:duration" use="optional" default="PT30S"/>
|
<xs:attribute name="idle-timeout" type="xs:duration" use="optional" default="PT30S"/>
|
||||||
<xs:attribute name="read-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
|
<xs:attribute name="read-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
|
||||||
<xs:attribute name="write-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
|
<xs:attribute name="write-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
|
||||||
<xs:attribute name="max-request-size" type="xs:unsignedInt" use="optional" default="67108864"/>
|
<xs:attribute name="max-request-size" type="rbcs:byteSizeType" use="optional" default="0x4000000"/>
|
||||||
</xs:complexType>
|
</xs:complexType>
|
||||||
|
|
||||||
<xs:complexType name="eventExecutorType">
|
<xs:complexType name="eventExecutorType">
|
||||||
@@ -52,10 +52,11 @@
|
|||||||
<xs:complexContent>
|
<xs:complexContent>
|
||||||
<xs:extension base="rbcs:cacheType">
|
<xs:extension base="rbcs:cacheType">
|
||||||
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
|
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
|
||||||
<xs:attribute name="max-size" type="xs:token" default="0x1000000"/>
|
<xs:attribute name="max-size" type="rbcs:byteSizeType" default="0x1000000"/>
|
||||||
<xs:attribute name="digest" type="xs:token" default="MD5"/>
|
<xs:attribute name="digest" type="xs:token" default="MD5"/>
|
||||||
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
|
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
|
||||||
<xs:attribute name="compression-level" type="xs:byte" default="-1"/>
|
<xs:attribute name="compression-level" type="rbcs:compressionLevelType" default="-1"/>
|
||||||
|
<xs:attribute name="chunk-size" type="rbcs:byteSizeType" default="0x10000"/>
|
||||||
</xs:extension>
|
</xs:extension>
|
||||||
</xs:complexContent>
|
</xs:complexContent>
|
||||||
</xs:complexType>
|
</xs:complexType>
|
||||||
@@ -63,11 +64,12 @@
|
|||||||
<xs:complexType name="fileSystemCacheType">
|
<xs:complexType name="fileSystemCacheType">
|
||||||
<xs:complexContent>
|
<xs:complexContent>
|
||||||
<xs:extension base="rbcs:cacheType">
|
<xs:extension base="rbcs:cacheType">
|
||||||
<xs:attribute name="path" type="xs:string" use="required"/>
|
<xs:attribute name="path" type="xs:string" use="optional"/>
|
||||||
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
|
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
|
||||||
<xs:attribute name="digest" type="xs:token" default="MD5"/>
|
<xs:attribute name="digest" type="xs:token" default="MD5"/>
|
||||||
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
|
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
|
||||||
<xs:attribute name="compression-level" type="xs:byte" default="-1"/>
|
<xs:attribute name="compression-level" type="rbcs:compressionLevelType" default="-1"/>
|
||||||
|
<xs:attribute name="chunk-size" type="rbcs:byteSizeType" default="0x10000"/>
|
||||||
</xs:extension>
|
</xs:extension>
|
||||||
</xs:complexContent>
|
</xs:complexContent>
|
||||||
</xs:complexType>
|
</xs:complexType>
|
||||||
@@ -220,5 +222,17 @@
|
|||||||
<xs:attribute name="port" type="xs:unsignedShort" use="required"/>
|
<xs:attribute name="port" type="xs:unsignedShort" use="required"/>
|
||||||
</xs:complexType>
|
</xs:complexType>
|
||||||
|
|
||||||
|
<xs:simpleType name="byteSizeType">
|
||||||
|
<xs:restriction base="xs:token">
|
||||||
|
<xs:pattern value="(0x[a-f0-9]+|[0-9]+)"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
|
||||||
|
<xs:simpleType name="compressionLevelType">
|
||||||
|
<xs:restriction base="xs:integer">
|
||||||
|
<xs:minInclusive value="-1"/>
|
||||||
|
<xs:maxInclusive value="9"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
|
||||||
</xs:schema>
|
</xs:schema>
|
||||||
|
@@ -47,11 +47,13 @@ abstract class AbstractBasicAuthServerTest : AbstractServerTest() {
|
|||||||
),
|
),
|
||||||
users.asSequence().map { it.name to it}.toMap(),
|
users.asSequence().map { it.name to it}.toMap(),
|
||||||
sequenceOf(writersGroup, readersGroup).map { it.name to it}.toMap(),
|
sequenceOf(writersGroup, readersGroup).map { it.name to it}.toMap(),
|
||||||
FileSystemCacheConfiguration(this.cacheDir,
|
FileSystemCacheConfiguration(
|
||||||
|
this.cacheDir,
|
||||||
maxAge = Duration.ofSeconds(3600 * 24),
|
maxAge = Duration.ofSeconds(3600 * 24),
|
||||||
digestAlgorithm = "MD5",
|
digestAlgorithm = "MD5",
|
||||||
compressionLevel = Deflater.DEFAULT_COMPRESSION,
|
compressionLevel = Deflater.DEFAULT_COMPRESSION,
|
||||||
compressionEnabled = false
|
compressionEnabled = false,
|
||||||
|
chunkSize = 0x1000
|
||||||
),
|
),
|
||||||
Configuration.BasicAuthentication(),
|
Configuration.BasicAuthentication(),
|
||||||
null,
|
null,
|
||||||
|
@@ -43,8 +43,9 @@ abstract class AbstractServerTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun stopServer() {
|
private fun stopServer() {
|
||||||
this.serverHandle?.use {
|
this.serverHandle?.let {
|
||||||
it.shutdown()
|
it.sendShutdownSignal()
|
||||||
|
it.get()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@@ -154,9 +154,10 @@ abstract class AbstractTlsServerTest : AbstractServerTest() {
|
|||||||
sequenceOf(writersGroup, readersGroup).map { it.name to it }.toMap(),
|
sequenceOf(writersGroup, readersGroup).map { it.name to it }.toMap(),
|
||||||
FileSystemCacheConfiguration(this.cacheDir,
|
FileSystemCacheConfiguration(this.cacheDir,
|
||||||
maxAge = Duration.ofSeconds(3600 * 24),
|
maxAge = Duration.ofSeconds(3600 * 24),
|
||||||
compressionEnabled = true,
|
compressionEnabled = false,
|
||||||
compressionLevel = Deflater.DEFAULT_COMPRESSION,
|
compressionLevel = Deflater.DEFAULT_COMPRESSION,
|
||||||
digestAlgorithm = "MD5"
|
digestAlgorithm = "MD5",
|
||||||
|
chunkSize = 0x1000
|
||||||
),
|
),
|
||||||
// InMemoryCacheConfiguration(
|
// InMemoryCacheConfiguration(
|
||||||
// maxAge = Duration.ofSeconds(3600 * 24),
|
// maxAge = Duration.ofSeconds(3600 * 24),
|
||||||
|
@@ -86,7 +86,7 @@ class BasicAuthServerTest : AbstractBasicAuthServerTest() {
|
|||||||
@Test
|
@Test
|
||||||
@Order(4)
|
@Order(4)
|
||||||
fun putAsAWriterUser() {
|
fun putAsAWriterUser() {
|
||||||
val client: HttpClient = HttpClient.newHttpClient()
|
val client: HttpClient = HttpClient.newBuilder().version(HttpClient.Version.HTTP_1_1).build()
|
||||||
|
|
||||||
val (key, value) = keyValuePair
|
val (key, value) = keyValuePair
|
||||||
val user = cfg.users.values.find {
|
val user = cfg.users.values.find {
|
||||||
|
@@ -52,7 +52,8 @@ class NoAuthServerTest : AbstractServerTest() {
|
|||||||
compressionEnabled = true,
|
compressionEnabled = true,
|
||||||
digestAlgorithm = "MD5",
|
digestAlgorithm = "MD5",
|
||||||
compressionLevel = Deflater.DEFAULT_COMPRESSION,
|
compressionLevel = Deflater.DEFAULT_COMPRESSION,
|
||||||
maxSize = 0x1000000
|
maxSize = 0x1000000,
|
||||||
|
chunkSize = 0x1000
|
||||||
),
|
),
|
||||||
null,
|
null,
|
||||||
null,
|
null,
|
||||||
@@ -80,7 +81,7 @@ class NoAuthServerTest : AbstractServerTest() {
|
|||||||
@Test
|
@Test
|
||||||
@Order(1)
|
@Order(1)
|
||||||
fun putWithNoAuthorizationHeader() {
|
fun putWithNoAuthorizationHeader() {
|
||||||
val client: HttpClient = HttpClient.newHttpClient()
|
val client: HttpClient = HttpClient.newBuilder().version(HttpClient.Version.HTTP_1_1).build()
|
||||||
val (key, value) = keyValuePair
|
val (key, value) = keyValuePair
|
||||||
|
|
||||||
val requestBuilder = newRequestBuilder(key)
|
val requestBuilder = newRequestBuilder(key)
|
||||||
|
@@ -11,7 +11,7 @@
|
|||||||
idle-timeout="PT30M"
|
idle-timeout="PT30M"
|
||||||
max-request-size="101325"/>
|
max-request-size="101325"/>
|
||||||
<event-executor use-virtual-threads="false"/>
|
<event-executor use-virtual-threads="false"/>
|
||||||
<cache xs:type="rbcs:fileSystemCacheType" path="/tmp/rbcs" max-age="P7D"/>
|
<cache xs:type="rbcs:fileSystemCacheType" path="/tmp/rbcs" max-age="P7D" chunk-size="0xa910"/>
|
||||||
<authentication>
|
<authentication>
|
||||||
<none/>
|
<none/>
|
||||||
</authentication>
|
</authentication>
|
||||||
|
@@ -13,7 +13,7 @@
|
|||||||
read-timeout="PT5M"
|
read-timeout="PT5M"
|
||||||
write-timeout="PT5M"/>
|
write-timeout="PT5M"/>
|
||||||
<event-executor use-virtual-threads="true"/>
|
<event-executor use-virtual-threads="true"/>
|
||||||
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" max-size="16777216" compression-mode="deflate">
|
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" chunk-size="123">
|
||||||
<server host="memcached" port="11211"/>
|
<server host="memcached" port="11211"/>
|
||||||
</cache>
|
</cache>
|
||||||
<authorization>
|
<authorization>
|
||||||
|
@@ -12,7 +12,7 @@
|
|||||||
idle-timeout="PT30M"
|
idle-timeout="PT30M"
|
||||||
max-request-size="101325"/>
|
max-request-size="101325"/>
|
||||||
<event-executor use-virtual-threads="false"/>
|
<event-executor use-virtual-threads="false"/>
|
||||||
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" max-size="101325" digest="SHA-256">
|
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" digest="SHA-256" chunk-size="456" compression-mode="deflate" compression-level="7">
|
||||||
<server host="127.0.0.1" port="11211" max-connections="10" connection-timeout="PT20S"/>
|
<server host="127.0.0.1" port="11211" max-connections="10" connection-timeout="PT20S"/>
|
||||||
</cache>
|
</cache>
|
||||||
<authentication>
|
<authentication>
|
||||||
|
@@ -11,7 +11,7 @@
|
|||||||
idle-timeout="PT30M"
|
idle-timeout="PT30M"
|
||||||
max-request-size="4096"/>
|
max-request-size="4096"/>
|
||||||
<event-executor use-virtual-threads="false"/>
|
<event-executor use-virtual-threads="false"/>
|
||||||
<cache xs:type="rbcs:inMemoryCacheType" max-age="P7D"/>
|
<cache xs:type="rbcs:inMemoryCacheType" max-age="P7D" chunk-size="0xa91f"/>
|
||||||
<authorization>
|
<authorization>
|
||||||
<users>
|
<users>
|
||||||
<user name="user1" password="password1">
|
<user name="user1" password="password1">
|
||||||
|
3
rbcs-servlet/Dockerfile
Normal file
3
rbcs-servlet/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
FROM tomcat:jdk21
|
||||||
|
|
||||||
|
COPY ./rbcs-servlet-*.war /usr/local/tomcat/webapps/rbcs-servlet.war
|
28
rbcs-servlet/README.md
Normal file
28
rbcs-servlet/README.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
## How to run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gradlew dockerBuildImage
|
||||||
|
```
|
||||||
|
then in this directory run
|
||||||
|
```bash
|
||||||
|
docker run --rm -p 127.0.0.1:8080:8080 -m 1G --name tomcat -v $(pwd)/conf/server.xml:/usr/local/tomcat/conf/server.xml gitea.woggioni.net/woggioni/rbcs/servlet:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
you can call the servlet cache with this RBCS client profile
|
||||||
|
```xml
|
||||||
|
<profile name="servlet" base-url="http://127.0.0.1:8080/rbcs-servlet/cache/" max-connections="100" enable-compression="false">
|
||||||
|
<no-auth/>
|
||||||
|
<connection
|
||||||
|
idle-timeout="PT5S"
|
||||||
|
read-idle-timeout="PT10S"
|
||||||
|
write-idle-timeout="PT10S"
|
||||||
|
read-timeout="PT5S"
|
||||||
|
write-timeout="PT5S"/>
|
||||||
|
<retry-policy max-attempts="10" initial-delay="PT1S" exp="1.2"/>
|
||||||
|
</profile>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
The servlet implementation has an in memory cache whose maximum
|
||||||
|
size is hardcoded to 0x8000000 bytes (around 134 MB)
|
33
rbcs-servlet/build.gradle
Normal file
33
rbcs-servlet/build.gradle
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
plugins {
|
||||||
|
alias(catalog.plugins.kotlin.jvm)
|
||||||
|
alias(catalog.plugins.gradle.docker)
|
||||||
|
id 'war'
|
||||||
|
}
|
||||||
|
|
||||||
|
import com.bmuschko.gradle.docker.tasks.image.DockerBuildImage
|
||||||
|
|
||||||
|
dependencies {
|
||||||
|
compileOnly catalog.jakarta.servlet.api
|
||||||
|
compileOnly catalog.jakarta.enterprise.cdi.api
|
||||||
|
|
||||||
|
implementation catalog.jwo
|
||||||
|
implementation catalog.jakarta.el
|
||||||
|
implementation catalog.jakarta.cdi.el.api
|
||||||
|
implementation catalog.weld.servlet.core
|
||||||
|
implementation catalog.weld.web
|
||||||
|
}
|
||||||
|
|
||||||
|
Provider<Copy> prepareDockerBuild = tasks.register('prepareDockerBuild', Copy) {
|
||||||
|
group = 'docker'
|
||||||
|
into project.layout.buildDirectory.file('docker')
|
||||||
|
from(tasks.war)
|
||||||
|
from(file('Dockerfile'))
|
||||||
|
}
|
||||||
|
|
||||||
|
Provider<DockerBuildImage> dockerBuild = tasks.register('dockerBuildImage', DockerBuildImage) {
|
||||||
|
group = 'docker'
|
||||||
|
dependsOn(prepareDockerBuild)
|
||||||
|
images.add('gitea.woggioni.net/woggioni/rbcs/servlet:latest')
|
||||||
|
images.add("gitea.woggioni.net/woggioni/rbcs/servlet:${version}")
|
||||||
|
}
|
||||||
|
|
140
rbcs-servlet/conf/server.xml
Normal file
140
rbcs-servlet/conf/server.xml
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!-- Note: A "Server" is not itself a "Container", so you may not
|
||||||
|
define subcomponents such as "Valves" at this level.
|
||||||
|
Documentation at /docs/config/server.html
|
||||||
|
-->
|
||||||
|
<Server port="8005" shutdown="SHUTDOWN">
|
||||||
|
<Listener className="org.apache.catalina.startup.VersionLoggerListener" />
|
||||||
|
<!-- Security listener. Documentation at /docs/config/listeners.html
|
||||||
|
<Listener className="org.apache.catalina.security.SecurityListener" />
|
||||||
|
-->
|
||||||
|
<!-- OpenSSL support using Tomcat Native -->
|
||||||
|
<Listener className="org.apache.catalina.core.AprLifecycleListener" />
|
||||||
|
<!-- OpenSSL support using FFM API from Java 22 -->
|
||||||
|
<!-- <Listener className="org.apache.catalina.core.OpenSSLLifecycleListener" /> -->
|
||||||
|
<!-- Prevent memory leaks due to use of particular java/javax APIs-->
|
||||||
|
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
|
||||||
|
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
|
||||||
|
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
|
||||||
|
|
||||||
|
<!-- Global JNDI resources
|
||||||
|
Documentation at /docs/jndi-resources-howto.html
|
||||||
|
-->
|
||||||
|
<GlobalNamingResources>
|
||||||
|
<!-- Editable user database that can also be used by
|
||||||
|
UserDatabaseRealm to authenticate users
|
||||||
|
-->
|
||||||
|
<Resource name="UserDatabase" auth="Container"
|
||||||
|
type="org.apache.catalina.UserDatabase"
|
||||||
|
description="User database that can be updated and saved"
|
||||||
|
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
|
||||||
|
pathname="conf/tomcat-users.xml" />
|
||||||
|
</GlobalNamingResources>
|
||||||
|
|
||||||
|
<!-- A "Service" is a collection of one or more "Connectors" that share
|
||||||
|
a single "Container" Note: A "Service" is not itself a "Container",
|
||||||
|
so you may not define subcomponents such as "Valves" at this level.
|
||||||
|
Documentation at /docs/config/service.html
|
||||||
|
-->
|
||||||
|
<Service name="Catalina">
|
||||||
|
|
||||||
|
<!--The connectors can use a shared executor, you can define one or more named thread pools-->
|
||||||
|
<!-- <Executor name="tomcatThreadPool" namePrefix="catalina-exec-" maxThreads="150" minSpareThreads="4"/>-->
|
||||||
|
<Executor name="tomcatThreadPool" namePrefix="virtual-exec-" className="org.apache.catalina.core.StandardVirtualThreadExecutor"/>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<!-- A "Connector" represents an endpoint by which requests are received
|
||||||
|
and responses are returned. Documentation at :
|
||||||
|
HTTP Connector: /docs/config/http.html
|
||||||
|
AJP Connector: /docs/config/ajp.html
|
||||||
|
Define a non-SSL/TLS HTTP/1.1 Connector on port 8080
|
||||||
|
-->
|
||||||
|
<!-- <Connector port="8080" protocol="HTTP/1.1" executor="tomcatThreadPool"-->
|
||||||
|
<!-- connectionTimeout="20000"-->
|
||||||
|
<!-- redirectPort="8443" />-->
|
||||||
|
<Connector port="8080" protocol="HTTP/1.1"
|
||||||
|
connectionTimeout="20000"
|
||||||
|
redirectPort="8443" />
|
||||||
|
<!-- A "Connector" using the shared thread pool-->
|
||||||
|
<!--
|
||||||
|
<Connector executor="tomcatThreadPool"
|
||||||
|
port="8080" protocol="HTTP/1.1"
|
||||||
|
connectionTimeout="20000"
|
||||||
|
redirectPort="8443" />
|
||||||
|
-->
|
||||||
|
<!-- Define an SSL/TLS HTTP/1.1 Connector on port 8443 with HTTP/2
|
||||||
|
This connector uses the NIO implementation. The default
|
||||||
|
SSLImplementation will depend on the presence of the APR/native
|
||||||
|
library and the useOpenSSL attribute of the AprLifecycleListener.
|
||||||
|
Either JSSE or OpenSSL style configuration may be used regardless of
|
||||||
|
the SSLImplementation selected. JSSE style configuration is used below.
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
<Connector port="8443" protocol="org.apache.coyote.http11.Http11NioProtocol"
|
||||||
|
maxThreads="150" SSLEnabled="true">
|
||||||
|
<UpgradeProtocol className="org.apache.coyote.http2.Http2Protocol" />
|
||||||
|
<SSLHostConfig>
|
||||||
|
<Certificate certificateKeystoreFile="conf/localhost-rsa.jks"
|
||||||
|
certificateKeystorePassword="changeit" type="RSA" />
|
||||||
|
</SSLHostConfig>
|
||||||
|
</Connector>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Define an AJP 1.3 Connector on port 8009 -->
|
||||||
|
<!--
|
||||||
|
<Connector protocol="AJP/1.3"
|
||||||
|
address="::1"
|
||||||
|
port="8009"
|
||||||
|
redirectPort="8443" />
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- An Engine represents the entry point (within Catalina) that processes
|
||||||
|
every request. The Engine implementation for Tomcat stand alone
|
||||||
|
analyzes the HTTP headers included with the request, and passes them
|
||||||
|
on to the appropriate Host (virtual host).
|
||||||
|
Documentation at /docs/config/engine.html -->
|
||||||
|
|
||||||
|
<!-- You should set jvmRoute to support load-balancing via AJP ie :
|
||||||
|
<Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
|
||||||
|
-->
|
||||||
|
<Engine name="Catalina" defaultHost="localhost">
|
||||||
|
|
||||||
|
<!--For clustering, please take a look at documentation at:
|
||||||
|
/docs/cluster-howto.html (simple how to)
|
||||||
|
/docs/config/cluster.html (reference documentation) -->
|
||||||
|
<!--
|
||||||
|
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Use the LockOutRealm to prevent attempts to guess user passwords
|
||||||
|
via a brute-force attack -->
|
||||||
|
<Realm className="org.apache.catalina.realm.LockOutRealm">
|
||||||
|
<!-- This Realm uses the UserDatabase configured in the global JNDI
|
||||||
|
resources under the key "UserDatabase". Any edits
|
||||||
|
that are performed against this UserDatabase are immediately
|
||||||
|
available for use by the Realm. -->
|
||||||
|
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
|
||||||
|
resourceName="UserDatabase"/>
|
||||||
|
</Realm>
|
||||||
|
|
||||||
|
<Host name="localhost" appBase="webapps"
|
||||||
|
unpackWARs="true" autoDeploy="true">
|
||||||
|
|
||||||
|
<!-- SingleSignOn valve, share authentication between web applications
|
||||||
|
Documentation at: /docs/config/valve.html -->
|
||||||
|
<!--
|
||||||
|
<Valve className="org.apache.catalina.authenticator.SingleSignOn" />
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Access log processes all example.
|
||||||
|
Documentation at: /docs/config/valve.html
|
||||||
|
Note: The pattern used is equivalent to using pattern="common" -->
|
||||||
|
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
|
||||||
|
prefix="localhost_access_log" suffix=".txt"
|
||||||
|
pattern="%h %l %u %t "%r" %s %b" />
|
||||||
|
|
||||||
|
</Host>
|
||||||
|
</Engine>
|
||||||
|
</Service>
|
||||||
|
</Server>
|
58
rbcs-servlet/conf/tomcat-users.xml
Normal file
58
rbcs-servlet/conf/tomcat-users.xml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<tomcat-users xmlns="http://tomcat.apache.org/xml"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://tomcat.apache.org/xml tomcat-users.xsd"
|
||||||
|
version="1.0">
|
||||||
|
<!--
|
||||||
|
By default, no user is included in the "manager-gui" role required
|
||||||
|
to operate the "/manager/html" web application. If you wish to use this app,
|
||||||
|
you must define such a user - the username and password are arbitrary.
|
||||||
|
|
||||||
|
Built-in Tomcat manager roles:
|
||||||
|
- manager-gui - allows access to the HTML GUI and the status pages
|
||||||
|
- manager-script - allows access to the HTTP API and the status pages
|
||||||
|
- manager-jmx - allows access to the JMX proxy and the status pages
|
||||||
|
- manager-status - allows access to the status pages only
|
||||||
|
|
||||||
|
The users below are wrapped in a comment and are therefore ignored. If you
|
||||||
|
wish to configure one or more of these users for use with the manager web
|
||||||
|
application, do not forget to remove the <!.. ..> that surrounds them. You
|
||||||
|
will also need to set the passwords to something appropriate.
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
<user username="admin" password="<must-be-changed>" roles="manager-gui"/>
|
||||||
|
<user username="robot" password="<must-be-changed>" roles="manager-script"/>
|
||||||
|
-->
|
||||||
|
<user username="luser" password="password" roles="manager-gui,admin-gui"/>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The sample user and role entries below are intended for use with the
|
||||||
|
examples web application. They are wrapped in a comment and thus are ignored
|
||||||
|
when reading this file. If you wish to configure these users for use with the
|
||||||
|
examples web application, do not forget to remove the <!.. ..> that surrounds
|
||||||
|
them. You will also need to set the passwords to something appropriate.
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
<role rolename="tomcat"/>
|
||||||
|
<role rolename="role1"/>
|
||||||
|
<user username="tomcat" password="<must-be-changed>" roles="tomcat"/>
|
||||||
|
<user username="both" password="<must-be-changed>" roles="tomcat,role1"/>
|
||||||
|
<user username="role1" password="<must-be-changed>" roles="role1"/>
|
||||||
|
-->
|
||||||
|
</tomcat-users>
|
@@ -0,0 +1,169 @@
|
|||||||
|
package net.woggioni.rbcs.servlet
|
||||||
|
|
||||||
|
import jakarta.annotation.PreDestroy
|
||||||
|
import jakarta.enterprise.context.ApplicationScoped
|
||||||
|
import jakarta.inject.Inject
|
||||||
|
import jakarta.servlet.annotation.WebServlet
|
||||||
|
import jakarta.servlet.http.HttpServlet
|
||||||
|
import jakarta.servlet.http.HttpServletRequest
|
||||||
|
import jakarta.servlet.http.HttpServletResponse
|
||||||
|
import net.woggioni.jwo.HttpClient.HttpStatus
|
||||||
|
import net.woggioni.jwo.JWO
|
||||||
|
import java.io.ByteArrayInputStream
|
||||||
|
import java.io.ByteArrayOutputStream
|
||||||
|
import java.nio.file.Path
|
||||||
|
import java.time.Duration
|
||||||
|
import java.time.Instant
|
||||||
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
|
import java.util.concurrent.PriorityBlockingQueue
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
import java.util.concurrent.atomic.AtomicLong
|
||||||
|
import java.util.logging.Logger
|
||||||
|
|
||||||
|
|
||||||
|
private class CacheKey(private val value: ByteArray) {
|
||||||
|
override fun equals(other: Any?) = if (other is CacheKey) {
|
||||||
|
value.contentEquals(other.value)
|
||||||
|
} else false
|
||||||
|
|
||||||
|
override fun hashCode() = value.contentHashCode()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ApplicationScoped
|
||||||
|
open class InMemoryServletCache : AutoCloseable {
|
||||||
|
|
||||||
|
private val maxAge= Duration.ofDays(7)
|
||||||
|
private val maxSize = 0x8000000
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
@JvmStatic
|
||||||
|
private val log = Logger.getLogger(this::class.java.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
private val size = AtomicLong()
|
||||||
|
private val map = ConcurrentHashMap<CacheKey, ByteArray>()
|
||||||
|
|
||||||
|
private class RemovalQueueElement(val key: CacheKey, val value: ByteArray, val expiry: Instant) :
|
||||||
|
Comparable<RemovalQueueElement> {
|
||||||
|
override fun compareTo(other: RemovalQueueElement) = expiry.compareTo(other.expiry)
|
||||||
|
}
|
||||||
|
|
||||||
|
private val removalQueue = PriorityBlockingQueue<RemovalQueueElement>()
|
||||||
|
|
||||||
|
@Volatile
|
||||||
|
private var running = false
|
||||||
|
|
||||||
|
private val garbageCollector = Thread.ofVirtual().name("in-memory-cache-gc").start {
|
||||||
|
while (running) {
|
||||||
|
val el = removalQueue.poll(1, TimeUnit.SECONDS) ?: continue
|
||||||
|
val value = el.value
|
||||||
|
val now = Instant.now()
|
||||||
|
if (now > el.expiry) {
|
||||||
|
val removed = map.remove(el.key, value)
|
||||||
|
if (removed) {
|
||||||
|
updateSizeAfterRemoval(value)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
removalQueue.put(el)
|
||||||
|
Thread.sleep(minOf(Duration.between(now, el.expiry), Duration.ofSeconds(1)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun removeEldest(): Long {
|
||||||
|
while (true) {
|
||||||
|
val el = removalQueue.take()
|
||||||
|
val value = el.value
|
||||||
|
val removed = map.remove(el.key, value)
|
||||||
|
if (removed) {
|
||||||
|
val newSize = updateSizeAfterRemoval(value)
|
||||||
|
return newSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun updateSizeAfterRemoval(removed: ByteArray): Long {
|
||||||
|
return size.updateAndGet { currentSize: Long ->
|
||||||
|
currentSize - removed.size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@PreDestroy
|
||||||
|
override fun close() {
|
||||||
|
running = false
|
||||||
|
garbageCollector.join()
|
||||||
|
}
|
||||||
|
|
||||||
|
open fun get(key: ByteArray) = map[CacheKey(key)]
|
||||||
|
|
||||||
|
open fun put(
|
||||||
|
key: ByteArray,
|
||||||
|
value: ByteArray,
|
||||||
|
) {
|
||||||
|
val cacheKey = CacheKey(key)
|
||||||
|
val oldSize = map.put(cacheKey, value)?.let { old ->
|
||||||
|
val result = old.size
|
||||||
|
result
|
||||||
|
} ?: 0
|
||||||
|
val delta = value.size - oldSize
|
||||||
|
var newSize = size.updateAndGet { currentSize: Long ->
|
||||||
|
currentSize + delta
|
||||||
|
}
|
||||||
|
removalQueue.put(RemovalQueueElement(cacheKey, value, Instant.now().plus(maxAge)))
|
||||||
|
while (newSize > maxSize) {
|
||||||
|
newSize = removeEldest()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@WebServlet(urlPatterns = ["/cache/*"])
|
||||||
|
class CacheServlet : HttpServlet() {
|
||||||
|
companion object {
|
||||||
|
@JvmStatic
|
||||||
|
private val log = Logger.getLogger(this::class.java.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
private lateinit var cache : InMemoryServletCache
|
||||||
|
|
||||||
|
private fun getKey(req : HttpServletRequest) : String {
|
||||||
|
return Path.of(req.pathInfo).fileName.toString()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun doPut(req: HttpServletRequest, resp: HttpServletResponse) {
|
||||||
|
val baos = ByteArrayOutputStream()
|
||||||
|
baos.use {
|
||||||
|
JWO.copy(req.inputStream, baos)
|
||||||
|
}
|
||||||
|
val key = getKey(req)
|
||||||
|
cache.put(key.toByteArray(Charsets.UTF_8), baos.toByteArray())
|
||||||
|
resp.status = 201
|
||||||
|
resp.setContentLength(0)
|
||||||
|
log.fine {
|
||||||
|
"[${Thread.currentThread().name}] Added value for key $key"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun doGet(req: HttpServletRequest, resp: HttpServletResponse) {
|
||||||
|
val key = getKey(req)
|
||||||
|
val value = cache.get(key.toByteArray(Charsets.UTF_8))
|
||||||
|
if (value == null) {
|
||||||
|
log.fine {
|
||||||
|
"[${Thread.currentThread().name}] Cache miss for key $key"
|
||||||
|
}
|
||||||
|
resp.status = HttpStatus.NOT_FOUND.code
|
||||||
|
resp.setContentLength(0)
|
||||||
|
} else {
|
||||||
|
log.fine {
|
||||||
|
"[${Thread.currentThread().name}] Cache hit for key $key"
|
||||||
|
}
|
||||||
|
resp.status = HttpStatus.OK.code
|
||||||
|
resp.setContentLength(value.size)
|
||||||
|
ByteArrayInputStream(value).use {
|
||||||
|
JWO.copy(it, resp.outputStream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
5
rbcs-servlet/src/main/resources/META-INF/beans.xml
Normal file
5
rbcs-servlet/src/main/resources/META-INF/beans.xml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
<beans xmlns="https://jakarta.ee/xml/ns/jakartaee"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="https://jakarta.ee/xml/ns/jakartaee https://jakarta.ee/xml/ns/jakartaee/beans_4_0.xsd"
|
||||||
|
version="4.0">
|
||||||
|
</beans>
|
7
rbcs-servlet/src/main/resources/META-INF/context.xml
Normal file
7
rbcs-servlet/src/main/resources/META-INF/context.xml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<Context antiJARLocking="true">
|
||||||
|
<Resource name="BeanManager"
|
||||||
|
auth="Container"
|
||||||
|
type="javax.enterprise.inject.spi.BeanManager"
|
||||||
|
factory="org.jboss.weld.resources.ManagerObjectFactory"/>
|
||||||
|
</Context>
|
8
rbcs-servlet/src/main/resources/logging.properties
Normal file
8
rbcs-servlet/src/main/resources/logging.properties
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
handlers = java.util.logging.ConsoleHandler
|
||||||
|
.level=INFO
|
||||||
|
net.woggioni.rbcs.servlet.level=FINEST
|
||||||
|
java.util.logging.ConsoleHandler.level=INFO
|
||||||
|
java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
|
||||||
|
java.util.logging.SimpleFormatter.format = %1$tF %1$tT [%4$s] %2$s %5$s %6$s%n
|
||||||
|
org.apache.catalina.core.ContainerBase.[Catalina].level=ALL
|
||||||
|
org.apache.catalina.core.ContainerBase.[Catalina].handlers=java.util.logging.ConsoleHandler
|
8
rbcs-servlet/src/main/webapp/WEB-INF/web.xml
Normal file
8
rbcs-servlet/src/main/webapp/WEB-INF/web.xml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
<web-app xmlns="https://jakarta.ee/xml/ns/jakartaee"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="https://jakarta.ee/xml/ns/jakartaee https://jakarta.ee/xml/ns/jakartaee/web-app_6_0.xsd"
|
||||||
|
version="6.0">
|
||||||
|
<listener>
|
||||||
|
<listener-class>org.jboss.weld.module.web.servlet.WeldTerminalListener</listener-class>
|
||||||
|
</listener>
|
||||||
|
</web-app>
|
@@ -29,6 +29,7 @@ include 'rbcs-api'
|
|||||||
include 'rbcs-common'
|
include 'rbcs-common'
|
||||||
include 'rbcs-server-memcache'
|
include 'rbcs-server-memcache'
|
||||||
include 'rbcs-cli'
|
include 'rbcs-cli'
|
||||||
include 'docker'
|
|
||||||
include 'rbcs-client'
|
include 'rbcs-client'
|
||||||
include 'rbcs-server'
|
include 'rbcs-server'
|
||||||
|
include 'rbcs-servlet'
|
||||||
|
include 'docker'
|
||||||
|
Reference in New Issue
Block a user