1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-10 02:02:49 +03:00

log2n compilation error fix, cache digest calculation fix

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1725262 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Stefan Eissing
2016-01-18 13:10:27 +00:00
parent bda008ac8d
commit 32ebc57fd1
6 changed files with 102 additions and 100 deletions

View File

@@ -1,6 +1,10 @@
-*- coding: utf-8 -*- -*- coding: utf-8 -*-
Changes with Apache 2.5.0 Changes with Apache 2.5.0
*) mod_http2: fixed compile issues re cc's with builtin log2n, fixed cache
digest calculation. [Stefan Eissing]
pushed resources are kept. See directive H2PushDiarySize for managing this.
*) core: Add expression support to SetHandler. *) core: Add expression support to SetHandler.
[Eric Covener] [Eric Covener]

View File

@@ -29,6 +29,7 @@
#include "h2_task.h" #include "h2_task.h"
#include "h2_stream.h" #include "h2_stream.h"
#include "h2_stream_set.h" #include "h2_stream_set.h"
#include "h2_request.h"
#include "h2_response.h" #include "h2_response.h"
#include "h2_session.h" #include "h2_session.h"
#include "h2_util.h" #include "h2_util.h"
@@ -203,6 +204,7 @@ static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb)
h2_stream *stream = sos->stream; h2_stream *stream = sos->stream;
h2_session *session = stream->session; h2_session *session = stream->session;
h2_mplx *mplx = session->mplx; h2_mplx *mplx = session->mplx;
h2_push_diary *diary;
apr_status_t status; apr_status_t status;
if (!bb) { if (!bb) {
@@ -225,21 +227,24 @@ static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb)
bbout(" \"pushes_submitted\": %d,\n", session->pushes_submitted); bbout(" \"pushes_submitted\": %d,\n", session->pushes_submitted);
bbout(" \"pushes_reset\": %d,\n", session->pushes_reset); bbout(" \"pushes_reset\": %d,\n", session->pushes_reset);
if (session->push_diary) { diary = session->push_diary;
if (diary) {
const char *data; const char *data;
const char *base64_digest; const char *base64_digest;
apr_size_t len; apr_size_t len;
status = h2_push_diary_digest_get(session->push_diary, stream->pool, 1024, &data, &len); status = h2_push_diary_digest_get(diary, stream->pool, 256,
stream->request->authority, &data, &len);
if (status == APR_SUCCESS) { if (status == APR_SUCCESS) {
base64_digest = h2_util_base64url_encode(data, len, stream->pool); base64_digest = h2_util_base64url_encode(data, len, stream->pool);
bbout(" \"cache_digest\": \"%s\",\n", base64_digest); bbout(" \"cache_digest\": \"%s\",\n", base64_digest);
} }
/* try the reverse for testing purposes */ /* try the reverse for testing purposes */
status = h2_push_diary_digest_set(session->push_diary, data, len); status = h2_push_diary_digest_set(diary, stream->request->authority, data, len);
if (status == APR_SUCCESS) { if (status == APR_SUCCESS) {
status = h2_push_diary_digest_get(session->push_diary, stream->pool, 1024, &data, &len); status = h2_push_diary_digest_get(diary, stream->pool, 256,
stream->request->authority, &data, &len);
if (status == APR_SUCCESS) { if (status == APR_SUCCESS) {
base64_digest = h2_util_base64url_encode(data, len, stream->pool); base64_digest = h2_util_base64url_encode(data, len, stream->pool);
bbout(" \"cache_digest^2\": \"%s\",\n", base64_digest); bbout(" \"cache_digest^2\": \"%s\",\n", base64_digest);

View File

@@ -468,6 +468,9 @@ void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_en
* push diary * push diary
******************************************************************************/ ******************************************************************************/
#define GCSLOG_LEVEL APLOG_TRACE1
typedef struct h2_push_diary_entry { typedef struct h2_push_diary_entry {
apr_uint64_t hash; apr_uint64_t hash;
} h2_push_diary_entry; } h2_push_diary_entry;
@@ -482,22 +485,25 @@ static void sha256_update(SHA256_CTX *ctx, const char *s)
static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push) static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
{ {
SHA256_CTX sha256; SHA256_CTX sha256;
union { apr_uint64_t val;
unsigned char hash[SHA256_DIGEST_LENGTH]; unsigned char hash[SHA256_DIGEST_LENGTH];
apr_uint64_t val; int i;
} ctx;
SHA256_Init(&sha256); SHA256_Init(&sha256);
sha256_update(&sha256, push->req->scheme); sha256_update(&sha256, push->req->scheme);
sha256_update(&sha256, "://"); sha256_update(&sha256, "://");
sha256_update(&sha256, push->req->authority); sha256_update(&sha256, push->req->authority);
sha256_update(&sha256, push->req->path); sha256_update(&sha256, push->req->path);
SHA256_Final(ctx.hash, &sha256); SHA256_Final(hash, &sha256);
*phash = ctx.val; val = 0;
for (i = 0; i != sizeof(val); ++i)
val = val * 256 + hash[i];
*phash = val >> (64 - diary->mask_bits);
} }
#endif #endif
static unsigned int val_apr_hash(const char *str) static unsigned int val_apr_hash(const char *str)
{ {
apr_ssize_t len = strlen(str); apr_ssize_t len = strlen(str);
@@ -521,6 +527,7 @@ static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *pu
static apr_int32_t ceil_power_of_2(apr_int32_t n) static apr_int32_t ceil_power_of_2(apr_int32_t n)
{ {
if (n <= 2) return 2;
--n; --n;
n |= n >> 1; n |= n >> 1;
n |= n >> 2; n |= n >> 2;
@@ -545,7 +552,7 @@ static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype,
* the full 64 bits. * the full 64 bits.
* If we set the diary via a compressed golomb set, we have less * If we set the diary via a compressed golomb set, we have less
* relevant bits and need to use a smaller mask. */ * relevant bits and need to use a smaller mask. */
diary->mask = 0xffffffffffffffffu; diary->mask_bits = 64;
/* grows by doubling, start with a power of 2 */ /* grows by doubling, start with a power of 2 */
diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry)); diary->entries = apr_array_make(p, 16, sizeof(h2_push_diary_entry));
@@ -578,7 +585,6 @@ static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash)
int i; int i;
/* search from the end, where the last accessed digests are */ /* search from the end, where the last accessed digests are */
hash &= diary->mask;
for (i = diary->entries->nelts-1; i >= 0; --i) { for (i = diary->entries->nelts-1; i >= 0; --i) {
e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry); e = &APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry);
if (e->hash == hash) { if (e->hash == hash) {
@@ -618,9 +624,8 @@ static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
ne = move_to_last(diary, 0); ne = move_to_last(diary, 0);
*ne = *e; *ne = *e;
} }
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, diary->entries->pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
"push_diary_append: masking %lx", ne->hash); "push_diary_append: %lx", ne->hash);
ne->hash &= diary->mask;
} }
apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes) apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
@@ -639,12 +644,12 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t
session->push_diary->dcalc(session->push_diary, &e.hash, push); session->push_diary->dcalc(session->push_diary, &e.hash, push);
idx = h2_push_diary_find(session->push_diary, e.hash); idx = h2_push_diary_find(session->push_diary, e.hash);
if (idx >= 0) { if (idx >= 0) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
"push_diary_update: already there PUSH %s", push->req->path); "push_diary_update: already there PUSH %s", push->req->path);
move_to_last(session->push_diary, idx); move_to_last(session->push_diary, idx);
} }
else { else {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
"push_diary_update: adding PUSH %s", push->req->path); "push_diary_update: adding PUSH %s", push->req->path);
if (!npushes) { if (!npushes) {
npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*)); npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*));
@@ -667,7 +672,8 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream,
apr_status_t status; apr_status_t status;
if (cache_digest && session->push_diary) { if (cache_digest && session->push_diary) {
status = h2_push_diary_digest64_set(session->push_diary, cache_digest, stream->pool); status = h2_push_diary_digest64_set(session->push_diary, req->authority,
cache_digest, stream->pool);
if (status != APR_SUCCESS) { if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
"h2_session(%ld): push diary set from Cache-Digest: %s", "h2_session(%ld): push diary set from Cache-Digest: %s",
@@ -678,8 +684,8 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream,
return h2_push_diary_update(stream->session, pushes); return h2_push_diary_update(stream->session, pushes);
} }
/* log2(n) iff n is a power of 2 */ /* h2_log2(n) iff n is a power of 2 */
static unsigned char log2(apr_uint32_t n) static unsigned char h2_log2(apr_uint32_t n)
{ {
int lz = 0; int lz = 0;
if (!n) { if (!n) {
@@ -708,17 +714,7 @@ static unsigned char log2(apr_uint32_t n)
return 31 - lz; return 31 - lz;
} }
/* log2(n) iff n is a power of 2 */ static apr_int32_t h2_log2inv(unsigned char log2)
static unsigned char log2_64(apr_uint64_t n)
{
apr_uint32_t i = (n & 0xffffffffu);
if (i) {
return log2(i);
}
return log2((apr_uint32_t)(n >> 32)) + 32;
}
static apr_int32_t log2inv(unsigned char log2)
{ {
return log2? (1 << log2) : 1; return log2? (1 << log2) : 1;
} }
@@ -728,7 +724,7 @@ typedef struct {
h2_push_diary *diary; h2_push_diary *diary;
unsigned char log2p; unsigned char log2p;
apr_uint32_t mask_bits; apr_uint32_t mask_bits;
apr_uint64_t mask; apr_uint32_t delta_bits;
apr_uint32_t fixed_bits; apr_uint32_t fixed_bits;
apr_uint64_t fixed_mask; apr_uint64_t fixed_mask;
apr_pool_t *pool; apr_pool_t *pool;
@@ -791,7 +787,7 @@ static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval)
delta = pval - encoder->last; delta = pval - encoder->last;
encoder->last = pval; encoder->last = pval;
flex_bits = (delta >> encoder->fixed_bits); flex_bits = (delta >> encoder->fixed_bits);
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, encoder->pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool,
"h2_push_diary_enc: val=%lx, delta=%lx flex_bits=%ld, " "h2_push_diary_enc: val=%lx, delta=%lx flex_bits=%ld, "
"fixed_bits=%d, fixed_val=%lx", "fixed_bits=%d, fixed_val=%lx",
pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask); pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask);
@@ -826,11 +822,11 @@ static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval)
* @param plen on successful return, the length of the binary data * @param plen on successful return, the length of the binary data
*/ */
apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
apr_uint32_t maxP, apr_uint32_t maxP, const char *authority,
const char **pdata, apr_size_t *plen) const char **pdata, apr_size_t *plen)
{ {
apr_size_t nelts, N, i; apr_size_t nelts, N, i;
unsigned char log2n, log2pmax, mask_bits; unsigned char log2n, log2pmax;
gset_encoder encoder; gset_encoder encoder;
apr_uint64_t *hashes; apr_uint64_t *hashes;
apr_size_t hash_count; apr_size_t hash_count;
@@ -842,26 +838,19 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
return APR_ENOTIMPL; return APR_ENOTIMPL;
} }
N = ceil_power_of_2(nelts); N = ceil_power_of_2(nelts);
log2n = log2(N); log2n = h2_log2(N);
mask_bits = log2_64(diary->mask + 1);
if (mask_bits <= log2n) {
/* uhm, what? */
return APR_ENOTIMPL;
}
/* Now log2p is the max number of relevant bits, so that /* Now log2p is the max number of relevant bits, so that
* log2p + log2n == mask_bits. We can uise a lower log2p * log2p + log2n == mask_bits. We can uise a lower log2p
* and have a shorter set encoding... * and have a shorter set encoding...
*/ */
log2pmax = log2(ceil_power_of_2(maxP)); log2pmax = h2_log2(ceil_power_of_2(maxP));
memset(&encoder, 0, sizeof(encoder)); memset(&encoder, 0, sizeof(encoder));
encoder.diary = diary; encoder.diary = diary;
encoder.log2p = H2MIN(mask_bits - log2n, log2pmax); encoder.log2p = H2MIN(diary->mask_bits - log2n, log2pmax);
encoder.mask_bits = log2n + encoder.log2p; encoder.mask_bits = log2n + encoder.log2p;
encoder.mask = 1; encoder.delta_bits = diary->mask_bits - encoder.mask_bits;
encoder.mask = (encoder.mask << encoder.mask_bits) - 1;
encoder.fixed_bits = encoder.log2p; encoder.fixed_bits = encoder.log2p;
encoder.fixed_mask = 1; encoder.fixed_mask = 1;
encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1; encoder.fixed_mask = (encoder.fixed_mask << encoder.fixed_bits) - 1;
@@ -875,29 +864,32 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
encoder.bit = 8; encoder.bit = 8;
encoder.last = 0; encoder.last = 0;
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, " "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, "
"mask_bits=%d, enc.mask_bits=%d, enc.log2p=%d", "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s",
(int)nelts, (int)N, (int)log2n, (int)mask_bits, (int)nelts, (int)N, (int)log2n, diary->mask_bits,
(int)encoder.mask_bits, (int)encoder.log2p); (int)encoder.mask_bits, (int)encoder.delta_bits,
(int)encoder.log2p, authority);
hash_count = diary->entries->nelts; if (!authority || !diary->authority
hashes = apr_pcalloc(encoder.pool, hash_count); || !strcmp("*", authority) || !strcmp(diary->authority, authority)) {
for (i = 0; i < hash_count; ++i) { hash_count = diary->entries->nelts;
hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash hashes = apr_pcalloc(encoder.pool, hash_count);
& encoder.mask); for (i = 0; i < hash_count; ++i) {
} hashes[i] = ((&APR_ARRAY_IDX(diary->entries, i, h2_push_diary_entry))->hash
>> encoder.delta_bits);
qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64);
for (i = 0; i < hash_count; ++i) {
if (!i || (hashes[i] != hashes[i-1])) {
gset_encode_next(&encoder, hashes[i]);
} }
qsort(hashes, hash_count, sizeof(apr_uint64_t), cmp_puint64);
for (i = 0; i < hash_count; ++i) {
if (!i || (hashes[i] != hashes[i-1])) {
gset_encode_next(&encoder, hashes[i]);
}
}
ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest_get: golomb compressed hashes, %d bytes",
(int)encoder.offset + 1);
} }
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool,
"h2_push_diary_digest_get: golomb compressed hashes, %d bytes",
(int)encoder.offset + 1);
*pdata = (const char *)encoder.data; *pdata = (const char *)encoder.data;
*plen = encoder.offset + 1; *plen = encoder.offset + 1;
@@ -958,7 +950,7 @@ static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
*phash = delta + decoder->last_val; *phash = delta + decoder->last_val;
decoder->last_val = *phash; decoder->last_val = *phash;
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, decoder->pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
"h2_push_diary_digest_dec: val=%lx, delta=%lx, flex=%d, fixed=%lx", "h2_push_diary_digest_dec: val=%lx, delta=%lx, flex=%d, fixed=%lx",
*phash, delta, (int)flex, fixed); *phash, delta, (int)flex, fixed);
@@ -974,7 +966,7 @@ static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
* @param len the length of the cache digest * @param len the length of the cache digest
* @return APR_EINVAL if digest was not successfully parsed * @return APR_EINVAL if digest was not successfully parsed
*/ */
apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
const char *data, apr_size_t len) const char *data, apr_size_t len)
{ {
gset_decoder decoder; gset_decoder decoder;
@@ -983,8 +975,6 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary,
apr_pool_t *pool = diary->entries->pool; apr_pool_t *pool = diary->entries->pool;
h2_push_diary_entry e; h2_push_diary_entry e;
apr_status_t status = APR_SUCCESS; apr_status_t status = APR_SUCCESS;
apr_uint64_t mask;
int mask_bits;
if (len < 2) { if (len < 2) {
/* at least this should be there */ /* at least this should be there */
@@ -992,23 +982,22 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary,
} }
log2n = data[0]; log2n = data[0];
log2p = data[1]; log2p = data[1];
mask_bits = log2n + log2p; diary->mask_bits = log2n + log2p;
if (mask_bits > 64) { if (diary->mask_bits > 64) {
/* cannot handle */ /* cannot handle */
return APR_ENOTIMPL; return APR_ENOTIMPL;
} }
else if (mask_bits == 64) {
mask = 0xffffffffffffffffu;
}
else {
mask = 1;
mask = (mask << mask_bits) - 1;
}
/* whatever is in the digest, it replaces the diary entries */ /* whatever is in the digest, it replaces the diary entries */
apr_array_clear(diary->entries); apr_array_clear(diary->entries);
if (!authority || !strcmp("*", authority)) {
diary->authority = NULL;
}
else if (!diary->authority || strcmp(diary->authority, authority)) {
diary->authority = apr_pstrdup(diary->entries->pool, authority);
}
N = log2inv(log2n + log2p); N = h2_log2inv(log2n + log2p);
decoder.diary = diary; decoder.diary = diary;
decoder.pool = pool; decoder.pool = pool;
@@ -1020,14 +1009,12 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary,
decoder.last_val = 0; decoder.last_val = 0;
diary->N = N; diary->N = N;
diary->mask = mask;
/* Determine effective N we use for storage */ /* Determine effective N we use for storage */
if (!N) { if (!N) {
/* a totally empty cache digest. someone tells us that she has no /* a totally empty cache digest. someone tells us that she has no
* entries in the cache at all. Use our own preferences for N+mask * entries in the cache at all. Use our own preferences for N+mask
*/ */
diary->N = diary->NMax; diary->N = diary->NMax;
diary->mask = 0xffffffffffffffffu;
return APR_SUCCESS; return APR_SUCCESS;
} }
else if (N > diary->NMax) { else if (N > diary->NMax) {
@@ -1036,10 +1023,10 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary,
diary->N = diary->NMax; diary->N = diary->NMax;
} }
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest_set: N=%d, log2n=%d, " "h2_push_diary_digest_set: N=%d, log2n=%d, "
"diary->mask=%lx, dec.log2p=%d", "diary->mask_bits=%d, dec.log2p=%d",
(int)diary->N, (int)log2n, diary->mask, (int)diary->N, (int)log2n, diary->mask_bits,
(int)decoder.log2p); (int)decoder.log2p);
for (i = 0; i < diary->N; ++i) { for (i = 0; i < diary->N; ++i) {
@@ -1050,20 +1037,20 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary,
h2_push_diary_append(diary, &e); h2_push_diary_append(diary, &e);
} }
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest_set: diary now with %d entries, mask=%lx", "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
(int)diary->entries->nelts, diary->mask); (int)diary->entries->nelts, diary->mask_bits);
return status; return status;
} }
apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *data64url, apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
apr_pool_t *pool) const char *data64url, apr_pool_t *pool)
{ {
const char *data; const char *data;
apr_size_t len = h2_util_base64url_decode(&data, data64url, pool); apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
"h2_push_diary_digest64_set: digest=%s, dlen=%d", "h2_push_diary_digest64_set: digest=%s, dlen=%d",
data64url, (int)len); data64url, (int)len);
return h2_push_diary_digest_set(diary, data, len); return h2_push_diary_digest_set(diary, authority, data, len);
} }

View File

@@ -45,7 +45,9 @@ struct h2_push_diary {
apr_array_header_t *entries; apr_array_header_t *entries;
apr_size_t NMax; /* Maximum for N, should size change be necessary */ apr_size_t NMax; /* Maximum for N, should size change be necessary */
apr_size_t N; /* Current maximum number of entries, power of 2 */ apr_size_t N; /* Current maximum number of entries, power of 2 */
apr_uint64_t mask; /* applied on hash value comparision */ apr_uint64_t mask; /* mask for relevant bits */
unsigned int mask_bits; /* number of relevant bits */
const char *authority;
h2_push_digest_type dtype; h2_push_digest_type dtype;
h2_push_digest_calc *dcalc; h2_push_digest_calc *dcalc;
}; };
@@ -103,26 +105,28 @@ apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
* *
* @param diary the diary to calculdate the digest from * @param diary the diary to calculdate the digest from
* @param p the pool to use * @param p the pool to use
* @param authority the authority to get the data for, use NULL/"*" for all
* @param pdata on successful return, the binary cache digest * @param pdata on successful return, the binary cache digest
* @param plen on successful return, the length of the binary data * @param plen on successful return, the length of the binary data
*/ */
apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p, apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
apr_uint32_t maxP, const char **pdata, apr_uint32_t maxP, const char *authority,
apr_size_t *plen); const char **pdata, apr_size_t *plen);
/** /**
* Initialize the push diary by a cache digest as described in * Initialize the push diary by a cache digest as described in
* https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
* . * .
* @param diary the diary to set the digest into * @param diary the diary to set the digest into
* @param authority the authority to set the data for
* @param data the binary cache digest * @param data the binary cache digest
* @param len the length of the cache digest * @param len the length of the cache digest
* @return APR_EINVAL if digest was not successfully parsed * @return APR_EINVAL if digest was not successfully parsed
*/ */
apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
const char *data, apr_size_t len); const char *data, apr_size_t len);
apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *data64url, apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
apr_pool_t *pool); const char *data64url, apr_pool_t *pool);
#endif /* defined(__mod_h2__h2_push__) */ #endif /* defined(__mod_h2__h2_push__) */

View File

@@ -174,7 +174,9 @@ const char *h2_util_base64url_encode(const char *data,
((i+1 < len)? (udata[i+1] >> 4) : 0) & 0x3fu ]; ((i+1 < len)? (udata[i+1] >> 4) : 0) & 0x3fu ];
*p++ = BASE64URL_CHARS[ (udata[i+1] << 2) + *p++ = BASE64URL_CHARS[ (udata[i+1] << 2) +
((i+2 < len)? (udata[i+2] >> 6) : 0) & 0x3fu ]; ((i+2 < len)? (udata[i+2] >> 6) : 0) & 0x3fu ];
*p++ = (i+2 < len)? BASE64URL_CHARS[ udata[i+2] & 0x3fu ] : '='; if (i+2 < len) {
*p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ];
}
} }
return enc; return enc;

View File

@@ -26,7 +26,7 @@
* @macro * @macro
* Version number of the http2 module as c string * Version number of the http2 module as c string
*/ */
#define MOD_HTTP2_VERSION "1.1.1-DEV" #define MOD_HTTP2_VERSION "1.2.1"
/** /**
* @macro * @macro
@@ -34,7 +34,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits * release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/ */
#define MOD_HTTP2_VERSION_NUM 0x010101 #define MOD_HTTP2_VERSION_NUM 0x010201
#endif /* mod_h2_h2_version_h */ #endif /* mod_h2_h2_version_h */