first commit with streaming support (buggy and unreliable)

This commit is contained in:
2025-02-13 23:02:08 +08:00
parent 7eca8a270d
commit 0463038aaa
37 changed files with 917 additions and 411 deletions

View File

@@ -16,7 +16,6 @@ import io.netty.handler.codec.compression.CompressionOptions
import io.netty.handler.codec.http.DefaultHttpContent
import io.netty.handler.codec.http.HttpContentCompressor
import io.netty.handler.codec.http.HttpHeaderNames
import io.netty.handler.codec.http.HttpObjectAggregator
import io.netty.handler.codec.http.HttpRequest
import io.netty.handler.codec.http.HttpServerCodec
import io.netty.handler.ssl.ClientAuth
@@ -249,13 +248,7 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
private val cache = cfg.cache.materialize()
private val serverHandler = let {
val prefix = Path.of("/").resolve(Path.of(cfg.serverPath ?: "/"))
ServerHandler(cache, prefix)
}
private val exceptionHandler = ExceptionHandler()
private val throttlingHandler = ThrottlingHandler(cfg)
private val authenticator = when (val auth = cfg.authentication) {
is Configuration.BasicAuthentication -> NettyHttpBasicAuthenticator(cfg.users, RoleAuthorizer())
@@ -368,11 +361,15 @@ class RemoteBuildCacheServer(private val cfg: Configuration) {
pipeline.addLast(HttpServerCodec())
pipeline.addLast(HttpChunkContentCompressor(1024))
pipeline.addLast(ChunkedWriteHandler())
pipeline.addLast(HttpObjectAggregator(cfg.connection.maxRequestSize))
authenticator?.let {
pipeline.addLast(it)
}
pipeline.addLast(throttlingHandler)
pipeline.addLast(ThrottlingHandler(cfg))
val serverHandler = let {
val prefix = Path.of("/").resolve(Path.of(cfg.serverPath ?: "/"))
ServerHandler(cache, prefix)
}
pipeline.addLast(eventExecutorGroup, serverHandler)
pipeline.addLast(exceptionHandler)
}

View File

@@ -6,6 +6,7 @@ import io.netty.channel.ChannelHandlerContext
import io.netty.channel.ChannelInboundHandlerAdapter
import io.netty.handler.codec.http.DefaultFullHttpResponse
import io.netty.handler.codec.http.FullHttpResponse
import io.netty.handler.codec.http.HttpContent
import io.netty.handler.codec.http.HttpHeaderNames
import io.netty.handler.codec.http.HttpRequest
import io.netty.handler.codec.http.HttpResponseStatus
@@ -57,6 +58,8 @@ abstract class AbstractNettyHttpAuthenticator(private val authorizer: Authorizer
} else {
authorizationFailure(ctx, msg)
}
} else if(msg is HttpContent) {
ctx.fireChannelRead(msg)
}
}

View File

@@ -1,12 +1,16 @@
package net.woggioni.rbcs.server.cache
import io.netty.buffer.ByteBuf
import io.netty.buffer.ByteBufAllocator
import net.woggioni.jwo.JWO
import net.woggioni.rbcs.api.Cache
import net.woggioni.rbcs.common.ByteBufInputStream
import net.woggioni.rbcs.api.RequestHandle
import net.woggioni.rbcs.api.ResponseHandle
import net.woggioni.rbcs.api.event.RequestStreamingEvent
import net.woggioni.rbcs.api.event.ResponseStreamingEvent
import net.woggioni.rbcs.common.ByteBufOutputStream
import net.woggioni.rbcs.common.RBCS.digestString
import net.woggioni.rbcs.common.contextLogger
import java.nio.channels.Channels
import net.woggioni.rbcs.common.extractChunk
import java.nio.channels.FileChannel
import java.nio.file.Files
import java.nio.file.Path
@@ -19,7 +23,6 @@ import java.time.Instant
import java.util.concurrent.CompletableFuture
import java.util.zip.Deflater
import java.util.zip.DeflaterOutputStream
import java.util.zip.Inflater
import java.util.zip.InflaterInputStream
class FileSystemCache(
@@ -27,7 +30,8 @@ class FileSystemCache(
val maxAge: Duration,
val digestAlgorithm: String?,
val compressionEnabled: Boolean,
val compressionLevel: Int
val compressionLevel: Int,
val chunkSize: Int
) : Cache {
private companion object {
@@ -44,61 +48,111 @@ class FileSystemCache(
private var nextGc = Instant.now()
override fun get(key: String) = (digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key).let { digest ->
root.resolve(digest).takeIf(Files::exists)
?.let { file ->
file.takeIf(Files::exists)?.let { file ->
if (compressionEnabled) {
val inflater = Inflater()
Channels.newChannel(
InflaterInputStream(
Channels.newInputStream(
FileChannel.open(
file,
StandardOpenOption.READ
)
), inflater
)
)
} else {
FileChannel.open(file, StandardOpenOption.READ)
}
}
}.let {
CompletableFuture.completedFuture(it)
}
}
override fun put(key: String, content: ByteBuf): CompletableFuture<Void> {
override fun get(key: String, responseHandle: ResponseHandle, alloc: ByteBufAllocator) {
(digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key).let { digest ->
val file = root.resolve(digest)
val tmpFile = Files.createTempFile(root, null, ".tmp")
try {
Files.newOutputStream(tmpFile).let {
root.resolve(digest).takeIf(Files::exists)
?.let { file ->
file.takeIf(Files::exists)?.let { file ->
responseHandle.handleEvent(ResponseStreamingEvent.RESPONSE_RECEIVED)
if (compressionEnabled) {
val compositeBuffer = alloc.compositeBuffer()
ByteBufOutputStream(compositeBuffer).use { outputStream ->
InflaterInputStream(Files.newInputStream(file)).use { inputStream ->
val ioBuffer = alloc.buffer(chunkSize)
try {
while (true) {
val read = ioBuffer.writeBytes(inputStream, chunkSize)
val last = read < 0
if (read > 0) {
ioBuffer.readBytes(outputStream, read)
}
if (last) {
compositeBuffer.retain()
outputStream.close()
}
if (compositeBuffer.readableBytes() >= chunkSize || last) {
val chunk = extractChunk(compositeBuffer, alloc)
val evt = if (last) {
ResponseStreamingEvent.LastChunkReceived(chunk)
} else {
ResponseStreamingEvent.ChunkReceived(chunk)
}
responseHandle.handleEvent(evt)
}
if (last) break
}
} finally {
ioBuffer.release()
}
}
}
} else {
responseHandle.handleEvent(
ResponseStreamingEvent.FileReceived(
FileChannel.open(file, StandardOpenOption.READ)
)
)
}
}
} ?: responseHandle.handleEvent(ResponseStreamingEvent.NOT_FOUND)
}
}
override fun put(
key: String,
responseHandle: ResponseHandle,
alloc: ByteBufAllocator
): CompletableFuture<RequestHandle> {
try {
(digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key).let { digest ->
val file = root.resolve(digest)
val tmpFile = Files.createTempFile(root, null, ".tmp")
val stream = Files.newOutputStream(tmpFile).let {
if (compressionEnabled) {
val deflater = Deflater(compressionLevel)
DeflaterOutputStream(it, deflater)
} else {
it
}
}.use {
JWO.copy(ByteBufInputStream(content), it)
}
Files.move(tmpFile, file, StandardCopyOption.ATOMIC_MOVE)
} catch (t: Throwable) {
Files.delete(tmpFile)
throw t
return CompletableFuture.completedFuture(object : RequestHandle {
override fun handleEvent(evt: RequestStreamingEvent) {
try {
when (evt) {
is RequestStreamingEvent.LastChunkReceived -> {
evt.chunk.readBytes(stream, evt.chunk.readableBytes())
stream.close()
Files.move(tmpFile, file, StandardCopyOption.ATOMIC_MOVE)
responseHandle.handleEvent(ResponseStreamingEvent.RESPONSE_RECEIVED)
}
is RequestStreamingEvent.ChunkReceived -> {
evt.chunk.readBytes(stream, evt.chunk.readableBytes())
}
is RequestStreamingEvent.ExceptionCaught -> {
Files.delete(tmpFile)
stream.close()
}
}
} catch (ex: Throwable) {
responseHandle.handleEvent(ResponseStreamingEvent.ExceptionCaught(ex))
}
}
})
}
} catch (ex: Throwable) {
responseHandle.handleEvent(ResponseStreamingEvent.ExceptionCaught(ex))
return CompletableFuture.failedFuture(ex)
}
return CompletableFuture.completedFuture(null)
}
private val garbageCollector = Thread.ofVirtual().name("file-system-cache-gc").start {
@@ -119,8 +173,8 @@ class FileSystemCache(
/**
* Returns the creation timestamp of the oldest cache entry (if any)
*/
private fun actualGc(now: Instant) : Instant? {
var result :Instant? = null
private fun actualGc(now: Instant): Instant? {
var result: Instant? = null
Files.list(root)
.filter { path ->
JWO.splitExtension(path)
@@ -132,7 +186,7 @@ class FileSystemCache(
val creationTimeStamp = Files.readAttributes(it, BasicFileAttributes::class.java)
.creationTime()
.toInstant()
if(result == null || creationTimeStamp < result) {
if (result == null || creationTimeStamp < result) {
result = creationTimeStamp
}
now > creationTimeStamp.plus(maxAge)

View File

@@ -12,13 +12,15 @@ data class FileSystemCacheConfiguration(
val digestAlgorithm : String?,
val compressionEnabled: Boolean,
val compressionLevel: Int,
val chunkSize: Int,
) : Configuration.Cache {
override fun materialize() = FileSystemCache(
root ?: Application.builder("rbcs").build().computeCacheDirectory(),
maxAge,
digestAlgorithm,
compressionEnabled,
compressionLevel
compressionLevel,
chunkSize,
)
override fun getNamespaceURI() = RBCS.RBCS_NAMESPACE_URI

View File

@@ -31,13 +31,17 @@ class FileSystemCacheProvider : CacheProvider<FileSystemCacheConfiguration> {
?.let(String::toInt)
?: Deflater.DEFAULT_COMPRESSION
val digestAlgorithm = el.renderAttribute("digest") ?: "MD5"
val chunkSize = el.renderAttribute("chunk-size")
?.let(Integer::decode)
?: 0x4000
return FileSystemCacheConfiguration(
path,
maxAge,
digestAlgorithm,
enableCompression,
compressionLevel
compressionLevel,
chunkSize
)
}
@@ -57,6 +61,7 @@ class FileSystemCacheProvider : CacheProvider<FileSystemCacheConfiguration> {
}?.let {
attr("compression-level", it.toString())
}
attr("chunk-size", chunkSize.toString())
}
result
}

View File

@@ -1,31 +1,36 @@
package net.woggioni.rbcs.server.cache
import io.netty.buffer.ByteBuf
import net.woggioni.jwo.JWO
import io.netty.buffer.ByteBufAllocator
import net.woggioni.rbcs.api.Cache
import net.woggioni.rbcs.common.ByteBufInputStream
import net.woggioni.rbcs.api.RequestHandle
import net.woggioni.rbcs.api.ResponseHandle
import net.woggioni.rbcs.api.event.RequestStreamingEvent
import net.woggioni.rbcs.api.event.ResponseStreamingEvent
import net.woggioni.rbcs.common.ByteBufOutputStream
import net.woggioni.rbcs.common.RBCS.digestString
import net.woggioni.rbcs.common.contextLogger
import java.nio.channels.Channels
import net.woggioni.rbcs.common.extractChunk
import java.security.MessageDigest
import java.time.Duration
import java.time.Instant
import java.util.concurrent.CompletableFuture
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.PriorityBlockingQueue
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import java.util.zip.Deflater
import java.util.zip.DeflaterOutputStream
import java.util.zip.Inflater
import java.util.zip.InflaterInputStream
import java.util.zip.InflaterOutputStream
class InMemoryCache(
val maxAge: Duration,
val maxSize: Long,
val digestAlgorithm: String?,
val compressionEnabled: Boolean,
val compressionLevel: Int
private val maxAge: Duration,
private val maxSize: Long,
private val digestAlgorithm: String?,
private val compressionEnabled: Boolean,
private val compressionLevel: Int,
private val chunkSize : Int
) : Cache {
companion object {
@@ -35,8 +40,9 @@ class InMemoryCache(
private val size = AtomicLong()
private val map = ConcurrentHashMap<String, ByteBuf>()
private class RemovalQueueElement(val key: String, val value : ByteBuf, val expiry : Instant) : Comparable<RemovalQueueElement> {
private class RemovalQueueElement(val key: String, val value: ByteBuf, val expiry: Instant) :
Comparable<RemovalQueueElement> {
override fun compareTo(other: RemovalQueueElement) = expiry.compareTo(other.expiry)
}
@@ -46,19 +52,17 @@ class InMemoryCache(
private var running = true
private val garbageCollector = Thread.ofVirtual().name("in-memory-cache-gc").start {
while(running) {
val el = removalQueue.take()
while (running) {
val el = removalQueue.poll(1, TimeUnit.SECONDS) ?: continue
val buf = el.value
val now = Instant.now()
if(now > el.expiry) {
if (now > el.expiry) {
val removed = map.remove(el.key, buf)
if(removed) {
if (removed) {
updateSizeAfterRemoval(buf)
//Decrease the reference count for map
buf.release()
}
//Decrease the reference count for removalQueue
buf.release()
} else {
removalQueue.put(el)
Thread.sleep(minOf(Duration.between(now, el.expiry), Duration.ofSeconds(1)))
@@ -66,14 +70,12 @@ class InMemoryCache(
}
}
private fun removeEldest() : Long {
while(true) {
private fun removeEldest(): Long {
while (true) {
val el = removalQueue.take()
val buf = el.value
val removed = map.remove(el.key, buf)
//Decrease the reference count for removalQueue
buf.release()
if(removed) {
if (removed) {
val newSize = updateSizeAfterRemoval(buf)
//Decrease the reference count for map
buf.release()
@@ -82,8 +84,8 @@ class InMemoryCache(
}
}
private fun updateSizeAfterRemoval(removed: ByteBuf) : Long {
return size.updateAndGet { currentSize : Long ->
private fun updateSizeAfterRemoval(removed: ByteBuf): Long {
return size.updateAndGet { currentSize: Long ->
currentSize - removed.readableBytes()
}
}
@@ -93,58 +95,114 @@ class InMemoryCache(
garbageCollector.join()
}
override fun get(key: String) =
(digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key
).let { digest ->
map[digest]
?.let { value ->
val copy = value.retainedDuplicate()
copy.touch("This has to be released by the caller of the cache")
if (compressionEnabled) {
val inflater = Inflater()
Channels.newChannel(InflaterInputStream(ByteBufInputStream(copy), inflater))
} else {
Channels.newChannel(ByteBufInputStream(copy))
}
override fun get(key: String, responseHandle: ResponseHandle, alloc: ByteBufAllocator) {
try {
(digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key
).let { digest ->
map[digest]
?.let { value ->
val copy = value.retainedDuplicate()
responseHandle.handleEvent(ResponseStreamingEvent.RESPONSE_RECEIVED)
val output = alloc.compositeBuffer()
if (compressionEnabled) {
try {
val stream = ByteBufOutputStream(output).let {
val inflater = Inflater()
InflaterOutputStream(it, inflater)
}
stream.use { os ->
var readable = copy.readableBytes()
while (true) {
copy.readBytes(os, chunkSize.coerceAtMost(readable))
readable = copy.readableBytes()
val last = readable == 0
if (last) stream.flush()
if (output.readableBytes() >= chunkSize || last) {
val chunk = extractChunk(output, alloc)
val evt = if (last) {
ResponseStreamingEvent.LastChunkReceived(chunk)
} else {
ResponseStreamingEvent.ChunkReceived(chunk)
}
responseHandle.handleEvent(evt)
}
if (last) break
}
}
} finally {
copy.release()
}
} else {
responseHandle.handleEvent(
ResponseStreamingEvent.LastChunkReceived(copy)
)
}
} ?: responseHandle.handleEvent(ResponseStreamingEvent.NOT_FOUND)
}
} catch (ex: Throwable) {
responseHandle.handleEvent(ResponseStreamingEvent.ExceptionCaught(ex))
}
}
override fun put(
key: String,
responseHandle: ResponseHandle,
alloc: ByteBufAllocator
): CompletableFuture<RequestHandle> {
return CompletableFuture.completedFuture(object : RequestHandle {
val buf = alloc.heapBuffer()
val stream = ByteBufOutputStream(buf).let {
if (compressionEnabled) {
val deflater = Deflater(compressionLevel)
DeflaterOutputStream(it, deflater)
} else {
it
}
}.let {
CompletableFuture.completedFuture(it)
}
override fun put(key: String, content: ByteBuf) =
(digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key).let { digest ->
content.retain()
val value = if (compressionEnabled) {
val deflater = Deflater(compressionLevel)
val buf = content.alloc().buffer()
buf.retain()
DeflaterOutputStream(ByteBufOutputStream(buf), deflater).use { outputStream ->
ByteBufInputStream(content).use { inputStream ->
JWO.copy(inputStream, outputStream)
override fun handleEvent(evt: RequestStreamingEvent) {
when (evt) {
is RequestStreamingEvent.ChunkReceived -> {
evt.chunk.readBytes(stream, evt.chunk.readableBytes())
if (evt is RequestStreamingEvent.LastChunkReceived) {
(digestAlgorithm
?.let(MessageDigest::getInstance)
?.let { md ->
digestString(key.toByteArray(), md)
} ?: key
).let { digest ->
val oldSize = map.put(digest, buf.retain())?.let { old ->
val result = old.readableBytes()
old.release()
result
} ?: 0
val delta = buf.readableBytes() - oldSize
var newSize = size.updateAndGet { currentSize : Long ->
currentSize + delta
}
removalQueue.put(RemovalQueueElement(digest, buf, Instant.now().plus(maxAge)))
while(newSize > maxSize) {
newSize = removeEldest()
}
stream.close()
responseHandle.handleEvent(ResponseStreamingEvent.RESPONSE_RECEIVED)
}
}
}
is RequestStreamingEvent.ExceptionCaught -> {
stream.close()
}
else -> {
}
}
buf
} else {
content
}
val old = map.put(digest, value)
val delta = value.readableBytes() - (old?.readableBytes() ?: 0)
var newSize = size.updateAndGet { currentSize : Long ->
currentSize + delta
}
removalQueue.put(RemovalQueueElement(digest, value.retain(), Instant.now().plus(maxAge)))
while(newSize > maxSize) {
newSize = removeEldest()
}
}.let {
CompletableFuture.completedFuture<Void>(null)
}
})
}
}

View File

@@ -10,13 +10,15 @@ data class InMemoryCacheConfiguration(
val digestAlgorithm : String?,
val compressionEnabled: Boolean,
val compressionLevel: Int,
val chunkSize : Int
) : Configuration.Cache {
override fun materialize() = InMemoryCache(
maxAge,
maxSize,
digestAlgorithm,
compressionEnabled,
compressionLevel
compressionLevel,
chunkSize
)
override fun getNamespaceURI() = RBCS.RBCS_NAMESPACE_URI

View File

@@ -31,13 +31,16 @@ class InMemoryCacheProvider : CacheProvider<InMemoryCacheConfiguration> {
?.let(String::toInt)
?: Deflater.DEFAULT_COMPRESSION
val digestAlgorithm = el.renderAttribute("digest") ?: "MD5"
val chunkSize = el.renderAttribute("chunk-size")
?.let(Integer::decode)
?: 0x4000
return InMemoryCacheConfiguration(
maxAge,
maxSize,
digestAlgorithm,
enableCompression,
compressionLevel
compressionLevel,
chunkSize
)
}
@@ -57,6 +60,7 @@ class InMemoryCacheProvider : CacheProvider<InMemoryCacheConfiguration> {
}?.let {
attr("compression-level", it.toString())
}
attr("chunk-size", chunkSize.toString())
}
result
}

View File

@@ -124,7 +124,7 @@ object Parser {
val writeIdleTimeout = child.renderAttribute("write-idle-timeout")
?.let(Duration::parse) ?: Duration.of(60, ChronoUnit.SECONDS)
val maxRequestSize = child.renderAttribute("max-request-size")
?.let(String::toInt) ?: 67108864
?.let(Integer::decode) ?: 0x4000000
connection = Configuration.Connection(
readTimeout,
writeTimeout,

View File

@@ -2,34 +2,66 @@ package net.woggioni.rbcs.server.handler
import io.netty.buffer.Unpooled
import io.netty.channel.ChannelFutureListener
import io.netty.channel.ChannelHandler
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.DefaultFileRegion
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.handler.codec.http.DefaultFullHttpResponse
import io.netty.handler.codec.http.DefaultHttpContent
import io.netty.handler.codec.http.DefaultHttpResponse
import io.netty.handler.codec.http.FullHttpRequest
import io.netty.handler.codec.http.DefaultLastHttpContent
import io.netty.handler.codec.http.HttpContent
import io.netty.handler.codec.http.HttpHeaderNames
import io.netty.handler.codec.http.HttpHeaderValues
import io.netty.handler.codec.http.HttpMethod
import io.netty.handler.codec.http.HttpObject
import io.netty.handler.codec.http.HttpRequest
import io.netty.handler.codec.http.HttpResponseStatus
import io.netty.handler.codec.http.HttpUtil
import io.netty.handler.codec.http.LastHttpContent
import io.netty.handler.stream.ChunkedNioStream
import net.woggioni.rbcs.api.Cache
import net.woggioni.rbcs.api.RequestHandle
import net.woggioni.rbcs.api.ResponseHandle
import net.woggioni.rbcs.api.event.RequestStreamingEvent
import net.woggioni.rbcs.api.event.ResponseStreamingEvent
import net.woggioni.rbcs.common.contextLogger
import net.woggioni.rbcs.common.debug
import net.woggioni.rbcs.server.debug
import net.woggioni.rbcs.server.warn
import java.nio.channels.FileChannel
import java.nio.file.Path
import java.util.concurrent.CompletableFuture
@ChannelHandler.Sharable
class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
SimpleChannelInboundHandler<FullHttpRequest>() {
SimpleChannelInboundHandler<HttpObject>() {
private val log = contextLogger()
override fun channelRead0(ctx: ChannelHandlerContext, msg: FullHttpRequest) {
override fun channelRead0(ctx: ChannelHandlerContext, msg: HttpObject) {
when(msg) {
is HttpRequest -> handleRequest(ctx, msg)
is HttpContent -> handleContent(msg)
}
}
private var requestHandle : CompletableFuture<RequestHandle?> = CompletableFuture.completedFuture(null)
private fun handleContent(content : HttpContent) {
content.retain()
requestHandle.thenAccept { handle ->
handle?.let {
val evt = if(content is LastHttpContent) {
RequestStreamingEvent.LastChunkReceived(content.content())
} else {
RequestStreamingEvent.ChunkReceived(content.content())
}
it.handleEvent(evt)
content.release()
} ?: content.release()
}
}
private fun handleRequest(ctx : ChannelHandlerContext, msg : HttpRequest) {
val keepAlive: Boolean = HttpUtil.isKeepAlive(msg)
val method = msg.method()
if (method === HttpMethod.GET) {
@@ -42,54 +74,55 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
return
}
if (serverPrefix == prefix) {
cache.get(key).thenApply { channel ->
if(channel != null) {
log.debug(ctx) {
"Cache hit for key '$key'"
}
val response = DefaultHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK)
response.headers()[HttpHeaderNames.CONTENT_TYPE] = HttpHeaderValues.APPLICATION_OCTET_STREAM
if (!keepAlive) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE)
response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.IDENTITY)
} else {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE)
val responseHandle = ResponseHandle { evt ->
when (evt) {
is ResponseStreamingEvent.ResponseReceived -> {
val response = DefaultHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK)
response.headers()[HttpHeaderNames.CONTENT_TYPE] = HttpHeaderValues.APPLICATION_OCTET_STREAM
if (!keepAlive) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE)
} else {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE)
}
response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED)
ctx.writeAndFlush(response)
}
ctx.write(response)
when (channel) {
is FileChannel -> {
val content = DefaultFileRegion(channel, 0, channel.size())
if (keepAlive) {
ctx.write(content)
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT.retainedDuplicate())
} else {
ctx.writeAndFlush(content)
.addListener(ChannelFutureListener.CLOSE)
}
}
else -> {
val content = ChunkedNioStream(channel)
if (keepAlive) {
ctx.write(content).addListener {
content.close()
}
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT.retainedDuplicate())
} else {
ctx.writeAndFlush(content)
.addListener(ChannelFutureListener.CLOSE)
}
is ResponseStreamingEvent.LastChunkReceived -> {
val channelFuture = ctx.writeAndFlush(DefaultLastHttpContent(evt.chunk))
if (!keepAlive) {
channelFuture
.addListener(ChannelFutureListener.CLOSE)
}
}
} else {
log.debug(ctx) {
"Cache miss for key '$key'"
is ResponseStreamingEvent.ChunkReceived -> {
ctx.writeAndFlush(DefaultHttpContent(evt.chunk))
}
is ResponseStreamingEvent.ExceptionCaught -> {
ctx.fireExceptionCaught(evt.exception)
}
is ResponseStreamingEvent.NotFound -> {
val response = DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.NOT_FOUND)
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = 0
ctx.writeAndFlush(response)
}
is ResponseStreamingEvent.FileReceived -> {
val content = DefaultFileRegion(evt.file, 0, evt.file.size())
if (keepAlive) {
ctx.write(content)
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT.retainedDuplicate())
} else {
ctx.writeAndFlush(content)
.addListener(ChannelFutureListener.CLOSE)
}
}
val response = DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.NOT_FOUND)
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = 0
ctx.writeAndFlush(response)
}
}.whenComplete { _, ex -> ex?.let(ctx::fireExceptionCaught) }
}
cache.get(key, responseHandle, ctx.alloc())
} else {
log.warn(ctx) {
"Got request for unhandled path '${msg.uri()}'"
@@ -107,15 +140,32 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
log.debug(ctx) {
"Added value for key '$key' to build cache"
}
cache.put(key, msg.content()).thenRun {
val response = DefaultFullHttpResponse(
msg.protocolVersion(), HttpResponseStatus.CREATED,
Unpooled.copiedBuffer(key.toByteArray())
)
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = response.content().readableBytes()
ctx.writeAndFlush(response)
}.whenComplete { _, ex ->
val responseHandle = ResponseHandle { evt ->
when (evt) {
is ResponseStreamingEvent.ResponseReceived -> {
val response = DefaultFullHttpResponse(
msg.protocolVersion(), HttpResponseStatus.CREATED,
Unpooled.copiedBuffer(key.toByteArray())
)
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = response.content().readableBytes()
ctx.writeAndFlush(response)
this.requestHandle = CompletableFuture.completedFuture(null)
}
is ResponseStreamingEvent.ChunkReceived -> {
evt.chunk.release()
}
is ResponseStreamingEvent.ExceptionCaught -> {
ctx.fireExceptionCaught(evt.exception)
}
else -> {}
}
}
this.requestHandle = cache.put(key, responseHandle, ctx.alloc()).exceptionally { ex ->
ctx.fireExceptionCaught(ex)
null
}.also {
log.debug { "Replacing request handle with $it"}
}
} else {
log.warn(ctx) {
@@ -125,9 +175,12 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
response.headers()[HttpHeaderNames.CONTENT_LENGTH] = "0"
ctx.writeAndFlush(response)
}
} else if(method == HttpMethod.TRACE) {
} else if (method == HttpMethod.TRACE) {
val replayedRequestHead = ctx.alloc().buffer()
replayedRequestHead.writeCharSequence("TRACE ${Path.of(msg.uri())} ${msg.protocolVersion().text()}\r\n", Charsets.US_ASCII)
replayedRequestHead.writeCharSequence(
"TRACE ${Path.of(msg.uri())} ${msg.protocolVersion().text()}\r\n",
Charsets.US_ASCII
)
msg.headers().forEach { (key, value) ->
replayedRequestHead.apply {
writeCharSequence(key, Charsets.US_ASCII)
@@ -137,16 +190,24 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
}
}
replayedRequestHead.writeCharSequence("\r\n", Charsets.US_ASCII)
val requestBody = msg.content()
requestBody.retain()
val responseBody = ctx.alloc().compositeBuffer(2).apply {
addComponents(true, replayedRequestHead)
addComponents(true, requestBody)
this.requestHandle = CompletableFuture.completedFuture(RequestHandle { evt ->
when(evt) {
is RequestStreamingEvent.LastChunkReceived -> {
ctx.writeAndFlush(DefaultLastHttpContent(evt.chunk.retain()))
this.requestHandle = CompletableFuture.completedFuture(null)
}
is RequestStreamingEvent.ChunkReceived -> ctx.writeAndFlush(DefaultHttpContent(evt.chunk.retain()))
is RequestStreamingEvent.ExceptionCaught -> ctx.fireExceptionCaught(evt.exception)
else -> {
}
}
}).also {
log.debug { "Replacing request handle with $it"}
}
val response = DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK, responseBody)
val response = DefaultHttpResponse(msg.protocolVersion(), HttpResponseStatus.OK)
response.headers().apply {
set(HttpHeaderNames.CONTENT_TYPE, "message/http")
set(HttpHeaderNames.CONTENT_LENGTH, responseBody.readableBytes())
}
ctx.writeAndFlush(response)
} else {
@@ -158,4 +219,11 @@ class ServerHandler(private val cache: Cache, private val serverPrefix: Path) :
ctx.writeAndFlush(response)
}
}
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
requestHandle.thenAccept { handle ->
handle?.handleEvent(RequestStreamingEvent.ExceptionCaught(cause))
}
super.exceptionCaught(ctx, cause)
}
}

View File

@@ -1,10 +1,11 @@
package net.woggioni.rbcs.server.throttling
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.ChannelInboundHandlerAdapter
import io.netty.handler.codec.http.DefaultFullHttpResponse
import io.netty.handler.codec.http.HttpContent
import io.netty.handler.codec.http.HttpHeaderNames
import io.netty.handler.codec.http.HttpRequest
import io.netty.handler.codec.http.HttpResponseStatus
import io.netty.handler.codec.http.HttpVersion
import net.woggioni.rbcs.api.Configuration
@@ -18,7 +19,6 @@ import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeUnit
@Sharable
class ThrottlingHandler(cfg: Configuration) : ChannelInboundHandlerAdapter() {
private companion object {
@@ -30,6 +30,8 @@ class ThrottlingHandler(cfg: Configuration) : ChannelInboundHandlerAdapter() {
private val connectionConfiguration = cfg.connection
private var queuedContent : MutableList<HttpContent>? = null
/**
* If the suggested waiting time from the bucket is lower than this
* amount, then the server will simply wait by itself before sending a response
@@ -41,25 +43,34 @@ class ThrottlingHandler(cfg: Configuration) : ChannelInboundHandlerAdapter() {
connectionConfiguration.writeIdleTimeout
).dividedBy(2)
override fun channelRead(ctx: ChannelHandlerContext, msg: Any) {
val buckets = mutableListOf<Bucket>()
val user = ctx.channel().attr(RemoteBuildCacheServer.userAttribute).get()
if (user != null) {
bucketManager.getBucketByUser(user)?.let(buckets::addAll)
}
val groups = ctx.channel().attr(RemoteBuildCacheServer.groupAttribute).get() ?: emptySet()
if (groups.isNotEmpty()) {
groups.forEach { group ->
bucketManager.getBucketByGroup(group)?.let(buckets::add)
if(msg is HttpRequest) {
val buckets = mutableListOf<Bucket>()
val user = ctx.channel().attr(RemoteBuildCacheServer.userAttribute).get()
if (user != null) {
bucketManager.getBucketByUser(user)?.let(buckets::addAll)
}
}
if (user == null && groups.isEmpty()) {
bucketManager.getBucketByAddress(ctx.channel().remoteAddress() as InetSocketAddress)?.let(buckets::add)
}
if (buckets.isEmpty()) {
return super.channelRead(ctx, msg)
val groups = ctx.channel().attr(RemoteBuildCacheServer.groupAttribute).get() ?: emptySet()
if (groups.isNotEmpty()) {
groups.forEach { group ->
bucketManager.getBucketByGroup(group)?.let(buckets::add)
}
}
if (user == null && groups.isEmpty()) {
bucketManager.getBucketByAddress(ctx.channel().remoteAddress() as InetSocketAddress)?.let(buckets::add)
}
if (buckets.isEmpty()) {
super.channelRead(ctx, msg)
} else {
handleBuckets(buckets, ctx, msg, true)
}
ctx.channel().id()
} else if(msg is HttpContent) {
queuedContent?.add(msg) ?: super.channelRead(ctx, msg)
} else {
handleBuckets(buckets, ctx, msg, true)
super.channelRead(ctx, msg)
}
}
@@ -73,9 +84,16 @@ class ThrottlingHandler(cfg: Configuration) : ChannelInboundHandlerAdapter() {
}
if (nextAttempt < 0) {
super.channelRead(ctx, msg)
queuedContent?.let {
for(content in it) {
super.channelRead(ctx, content)
}
queuedContent = null
}
} else {
val waitDuration = Duration.of(LongMath.ceilDiv(nextAttempt, 100_000_000L) * 100L, ChronoUnit.MILLIS)
if (delayResponse && waitDuration < waitThreshold) {
this.queuedContent = mutableListOf()
ctx.executor().schedule({
handleBuckets(buckets, ctx, msg, false)
}, waitDuration.toMillis(), TimeUnit.MILLISECONDS)

View File

@@ -39,7 +39,7 @@
<xs:attribute name="idle-timeout" type="xs:duration" use="optional" default="PT30S"/>
<xs:attribute name="read-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
<xs:attribute name="write-idle-timeout" type="xs:duration" use="optional" default="PT60S"/>
<xs:attribute name="max-request-size" type="xs:unsignedInt" use="optional" default="67108864"/>
<xs:attribute name="max-request-size" type="rbcs:byteSize" use="optional" default="0x4000000"/>
</xs:complexType>
<xs:complexType name="eventExecutorType">
@@ -52,10 +52,11 @@
<xs:complexContent>
<xs:extension base="rbcs:cacheType">
<xs:attribute name="max-age" type="xs:duration" default="P1D"/>
<xs:attribute name="max-size" type="xs:token" default="0x1000000"/>
<xs:attribute name="max-size" type="rbcs:byteSize" default="0x1000000"/>
<xs:attribute name="digest" type="xs:token" default="MD5"/>
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
<xs:attribute name="compression-level" type="xs:byte" default="-1"/>
<xs:attribute name="chunk-size" type="rbcs:byteSize" default="0x4000"/>
</xs:extension>
</xs:complexContent>
</xs:complexType>
@@ -68,6 +69,7 @@
<xs:attribute name="digest" type="xs:token" default="MD5"/>
<xs:attribute name="enable-compression" type="xs:boolean" default="true"/>
<xs:attribute name="compression-level" type="xs:byte" default="-1"/>
<xs:attribute name="chunk-size" type="rbcs:byteSize" default="0x4000"/>
</xs:extension>
</xs:complexContent>
</xs:complexType>
@@ -220,5 +222,10 @@
<xs:attribute name="port" type="xs:unsignedShort" use="required"/>
</xs:complexType>
<xs:simpleType name="byteSize">
<xs:restriction base="xs:token">
<xs:pattern value="(0x[a-f0-9]+|[0-9]+)"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>

View File

@@ -47,11 +47,13 @@ abstract class AbstractBasicAuthServerTest : AbstractServerTest() {
),
users.asSequence().map { it.name to it}.toMap(),
sequenceOf(writersGroup, readersGroup).map { it.name to it}.toMap(),
FileSystemCacheConfiguration(this.cacheDir,
FileSystemCacheConfiguration(
this.cacheDir,
maxAge = Duration.ofSeconds(3600 * 24),
digestAlgorithm = "MD5",
compressionLevel = Deflater.DEFAULT_COMPRESSION,
compressionEnabled = false
compressionEnabled = false,
chunkSize = 0x1000
),
Configuration.BasicAuthentication(),
null,

View File

@@ -156,7 +156,8 @@ abstract class AbstractTlsServerTest : AbstractServerTest() {
maxAge = Duration.ofSeconds(3600 * 24),
compressionEnabled = true,
compressionLevel = Deflater.DEFAULT_COMPRESSION,
digestAlgorithm = "MD5"
digestAlgorithm = "MD5",
chunkSize = 0x1000
),
// InMemoryCacheConfiguration(
// maxAge = Duration.ofSeconds(3600 * 24),

View File

@@ -86,7 +86,7 @@ class BasicAuthServerTest : AbstractBasicAuthServerTest() {
@Test
@Order(4)
fun putAsAWriterUser() {
val client: HttpClient = HttpClient.newHttpClient()
val client: HttpClient = HttpClient.newBuilder().version(HttpClient.Version.HTTP_1_1).build()
val (key, value) = keyValuePair
val user = cfg.users.values.find {

View File

@@ -52,7 +52,8 @@ class NoAuthServerTest : AbstractServerTest() {
compressionEnabled = true,
digestAlgorithm = "MD5",
compressionLevel = Deflater.DEFAULT_COMPRESSION,
maxSize = 0x1000000
maxSize = 0x1000000,
chunkSize = 0x1000
),
null,
null,
@@ -80,7 +81,7 @@ class NoAuthServerTest : AbstractServerTest() {
@Test
@Order(1)
fun putWithNoAuthorizationHeader() {
val client: HttpClient = HttpClient.newHttpClient()
val client: HttpClient = HttpClient.newBuilder().version(HttpClient.Version.HTTP_1_1).build()
val (key, value) = keyValuePair
val requestBuilder = newRequestBuilder(key)

View File

@@ -11,7 +11,7 @@
idle-timeout="PT30M"
max-request-size="101325"/>
<event-executor use-virtual-threads="false"/>
<cache xs:type="rbcs:fileSystemCacheType" path="/tmp/rbcs" max-age="P7D"/>
<cache xs:type="rbcs:fileSystemCacheType" path="/tmp/rbcs" max-age="P7D" chunk-size="0xa910"/>
<authentication>
<none/>
</authentication>

View File

@@ -13,7 +13,7 @@
read-timeout="PT5M"
write-timeout="PT5M"/>
<event-executor use-virtual-threads="true"/>
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" max-size="16777216" compression-mode="deflate">
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" max-size="16777216" compression-mode="deflate" chunk-size="123">
<server host="memcached" port="11211"/>
</cache>
<authorization>

View File

@@ -12,7 +12,7 @@
idle-timeout="PT30M"
max-request-size="101325"/>
<event-executor use-virtual-threads="false"/>
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" max-size="101325" digest="SHA-256">
<cache xs:type="rbcs-memcache:memcacheCacheType" max-age="P7D" max-size="101325" digest="SHA-256" chunk-size="456">
<server host="127.0.0.1" port="11211" max-connections="10" connection-timeout="PT20S"/>
</cache>
<authentication>

View File

@@ -11,7 +11,7 @@
idle-timeout="PT30M"
max-request-size="4096"/>
<event-executor use-virtual-threads="false"/>
<cache xs:type="rbcs:inMemoryCacheType" max-age="P7D"/>
<cache xs:type="rbcs:inMemoryCacheType" max-age="P7D" chunk-size="0xa91f"/>
<authorization>
<users>
<user name="user1" password="password1">