mirror of
https://github.com/git/git.git
synced 2024-11-05 16:52:59 +01:00
Merge part of np/delta
This commit is contained in:
commit
d00e0f8101
1 changed files with 75 additions and 49 deletions
124
diff-delta.c
124
diff-delta.c
|
@ -20,63 +20,93 @@
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <zlib.h>
|
|
||||||
#include "delta.h"
|
#include "delta.h"
|
||||||
|
|
||||||
|
|
||||||
/* block size: min = 16, max = 64k, power of 2 */
|
|
||||||
#define BLK_SIZE 16
|
|
||||||
|
|
||||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
||||||
|
|
||||||
#define GR_PRIME 0x9e370001
|
|
||||||
#define HASH(v, shift) (((unsigned int)(v) * GR_PRIME) >> (shift))
|
|
||||||
|
|
||||||
struct index {
|
struct index {
|
||||||
const unsigned char *ptr;
|
const unsigned char *ptr;
|
||||||
unsigned int val;
|
|
||||||
struct index *next;
|
struct index *next;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct index ** delta_index(const unsigned char *buf,
|
static struct index ** delta_index(const unsigned char *buf,
|
||||||
unsigned long bufsize,
|
unsigned long bufsize,
|
||||||
|
unsigned long trg_bufsize,
|
||||||
unsigned int *hash_shift)
|
unsigned int *hash_shift)
|
||||||
{
|
{
|
||||||
unsigned int hsize, hshift, entries, blksize, i;
|
unsigned long hsize;
|
||||||
|
unsigned int i, hshift, hlimit, *hash_count;
|
||||||
const unsigned char *data;
|
const unsigned char *data;
|
||||||
struct index *entry, **hash;
|
struct index *entry, **hash;
|
||||||
void *mem;
|
void *mem;
|
||||||
|
|
||||||
/* determine index hash size */
|
/* determine index hash size */
|
||||||
entries = (bufsize + BLK_SIZE - 1) / BLK_SIZE;
|
hsize = bufsize / 4;
|
||||||
hsize = entries / 4;
|
for (i = 8; (1 << i) < hsize && i < 24; i += 2);
|
||||||
for (i = 4; (1 << i) < hsize && i < 16; i++);
|
|
||||||
hsize = 1 << i;
|
hsize = 1 << i;
|
||||||
hshift = 32 - i;
|
hshift = (i - 8) / 2;
|
||||||
*hash_shift = hshift;
|
*hash_shift = hshift;
|
||||||
|
|
||||||
/* allocate lookup index */
|
/* allocate lookup index */
|
||||||
mem = malloc(hsize * sizeof(*hash) + entries * sizeof(*entry));
|
mem = malloc(hsize * sizeof(*hash) + bufsize * sizeof(*entry));
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return NULL;
|
return NULL;
|
||||||
hash = mem;
|
hash = mem;
|
||||||
entry = mem + hsize * sizeof(*hash);
|
entry = mem + hsize * sizeof(*hash);
|
||||||
memset(hash, 0, hsize * sizeof(*hash));
|
memset(hash, 0, hsize * sizeof(*hash));
|
||||||
|
|
||||||
/* then populate it */
|
/* allocate an array to count hash entries */
|
||||||
data = buf + entries * BLK_SIZE - BLK_SIZE;
|
hash_count = calloc(hsize, sizeof(*hash_count));
|
||||||
blksize = bufsize - (data - buf);
|
if (!hash_count) {
|
||||||
while (data >= buf) {
|
free(hash);
|
||||||
unsigned int val = adler32(0, data, blksize);
|
return NULL;
|
||||||
i = HASH(val, hshift);
|
}
|
||||||
entry->ptr = data;
|
|
||||||
entry->val = val;
|
/* then populate the index */
|
||||||
|
data = buf + bufsize - 2;
|
||||||
|
while (data > buf) {
|
||||||
|
entry->ptr = --data;
|
||||||
|
i = data[0] ^ ((data[1] ^ (data[2] << hshift)) << hshift);
|
||||||
entry->next = hash[i];
|
entry->next = hash[i];
|
||||||
hash[i] = entry++;
|
hash[i] = entry++;
|
||||||
blksize = BLK_SIZE;
|
hash_count[i]++;
|
||||||
data -= BLK_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine a limit on the number of entries in the same hash
|
||||||
|
* bucket. This guard us against patological data sets causing
|
||||||
|
* really bad hash distribution with most entries in the same hash
|
||||||
|
* bucket that would bring us to O(m*n) computing costs (m and n
|
||||||
|
* corresponding to reference and target buffer sizes).
|
||||||
|
*
|
||||||
|
* The more the target buffer is large, the more it is important to
|
||||||
|
* have small entry lists for each hash buckets. With such a limit
|
||||||
|
* the cost is bounded to something more like O(m+n).
|
||||||
|
*/
|
||||||
|
hlimit = (1 << 26) / trg_bufsize;
|
||||||
|
if (hlimit < 16)
|
||||||
|
hlimit = 16;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now make sure none of the hash buckets has more entries than
|
||||||
|
* we're willing to test. Otherwise we short-circuit the entry
|
||||||
|
* list uniformly to still preserve a good repartition across
|
||||||
|
* the reference buffer.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < hsize; i++) {
|
||||||
|
if (hash_count[i] < hlimit)
|
||||||
|
continue;
|
||||||
|
entry = hash[i];
|
||||||
|
do {
|
||||||
|
struct index *keep = entry;
|
||||||
|
int skip = hash_count[i] / hlimit / 2;
|
||||||
|
do {
|
||||||
|
entry = entry->next;
|
||||||
|
} while(--skip && entry);
|
||||||
|
keep->next = entry;
|
||||||
|
} while(entry);
|
||||||
|
}
|
||||||
|
free(hash_count);
|
||||||
|
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +130,7 @@ void *diff_delta(void *from_buf, unsigned long from_size,
|
||||||
|
|
||||||
if (!from_size || !to_size)
|
if (!from_size || !to_size)
|
||||||
return NULL;
|
return NULL;
|
||||||
hash = delta_index(from_buf, from_size, &hash_shift);
|
hash = delta_index(from_buf, from_size, to_size, &hash_shift);
|
||||||
if (!hash)
|
if (!hash)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -141,29 +171,25 @@ void *diff_delta(void *from_buf, unsigned long from_size,
|
||||||
|
|
||||||
while (data < top) {
|
while (data < top) {
|
||||||
unsigned int moff = 0, msize = 0;
|
unsigned int moff = 0, msize = 0;
|
||||||
unsigned int blksize = MIN(top - data, BLK_SIZE);
|
if (data + 3 <= top) {
|
||||||
unsigned int val = adler32(0, data, blksize);
|
i = data[0] ^ ((data[1] ^ (data[2] << hash_shift)) << hash_shift);
|
||||||
i = HASH(val, hash_shift);
|
for (entry = hash[i]; entry; entry = entry->next) {
|
||||||
for (entry = hash[i]; entry; entry = entry->next) {
|
const unsigned char *ref = entry->ptr;
|
||||||
const unsigned char *ref = entry->ptr;
|
const unsigned char *src = data;
|
||||||
const unsigned char *src = data;
|
unsigned int ref_size = ref_top - ref;
|
||||||
unsigned int ref_size = ref_top - ref;
|
if (ref_size > top - src)
|
||||||
if (entry->val != val)
|
ref_size = top - src;
|
||||||
continue;
|
if (ref_size > 0x10000)
|
||||||
if (ref_size > top - src)
|
ref_size = 0x10000;
|
||||||
ref_size = top - src;
|
if (ref_size <= msize)
|
||||||
while (ref_size && *src++ == *ref) {
|
|
||||||
ref++;
|
|
||||||
ref_size--;
|
|
||||||
}
|
|
||||||
ref_size = ref - entry->ptr;
|
|
||||||
if (ref_size > msize) {
|
|
||||||
/* this is our best match so far */
|
|
||||||
moff = entry->ptr - ref_data;
|
|
||||||
msize = ref_size;
|
|
||||||
if (msize >= 0x10000) {
|
|
||||||
msize = 0x10000;
|
|
||||||
break;
|
break;
|
||||||
|
if (*ref != *src)
|
||||||
|
continue;
|
||||||
|
while (ref_size-- && *++src == *++ref);
|
||||||
|
if (msize < ref - entry->ptr) {
|
||||||
|
/* this is our best match so far */
|
||||||
|
msize = ref - entry->ptr;
|
||||||
|
moff = entry->ptr - ref_data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue