mirror of
git://erdgeist.org/opentracker
synced 2025-02-22 17:11:29 +08:00
clang-format
This commit is contained in:
parent
4c5935c057
commit
7c633c259e
835
opentracker.c
835
opentracker.c
File diff suppressed because it is too large
Load Diff
314
ot_accesslist.c
314
ot_accesslist.c
@ -5,35 +5,35 @@
|
||||
|
||||
/* System */
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
/* Libowfat */
|
||||
#include "byte.h"
|
||||
#include "scan.h"
|
||||
#include "fmt.h"
|
||||
#include "ip6.h"
|
||||
#include "mmap.h"
|
||||
#include "fmt.h"
|
||||
#include "scan.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_accesslist.h"
|
||||
#include "ot_vector.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
/* GLOBAL VARIABLES */
|
||||
#ifdef WANT_ACCESSLIST
|
||||
char *g_accesslist_filename = NULL;
|
||||
char *g_accesslist_filename = NULL;
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
char *g_accesslist_pipe_add = NULL;
|
||||
char *g_accesslist_pipe_delete = NULL;
|
||||
char *g_accesslist_pipe_add = NULL;
|
||||
char *g_accesslist_pipe_delete = NULL;
|
||||
#endif
|
||||
static pthread_mutex_t g_accesslist_mutex;
|
||||
|
||||
@ -55,20 +55,18 @@ struct ot_accesslist {
|
||||
ot_time base;
|
||||
ot_accesslist *next;
|
||||
};
|
||||
static ot_accesslist * _Atomic g_accesslist = NULL;
|
||||
static ot_accesslist *_Atomic g_accesslist = NULL;
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
static ot_accesslist * _Atomic g_accesslist_add = NULL;
|
||||
static ot_accesslist * _Atomic g_accesslist_delete = NULL;
|
||||
static ot_accesslist *_Atomic g_accesslist_add = NULL;
|
||||
static ot_accesslist *_Atomic g_accesslist_delete = NULL;
|
||||
#endif
|
||||
|
||||
/* Helpers to work on access lists */
|
||||
static int vector_compare_hash(const void *hash1, const void *hash2 ) {
|
||||
return memcmp( hash1, hash2, OT_HASH_COMPARE_SIZE );
|
||||
}
|
||||
static int vector_compare_hash(const void *hash1, const void *hash2) { return memcmp(hash1, hash2, OT_HASH_COMPARE_SIZE); }
|
||||
|
||||
static ot_accesslist * accesslist_free(ot_accesslist *accesslist) {
|
||||
static ot_accesslist *accesslist_free(ot_accesslist *accesslist) {
|
||||
while (accesslist) {
|
||||
ot_accesslist * this_accesslist = accesslist;
|
||||
ot_accesslist *this_accesslist = accesslist;
|
||||
accesslist = this_accesslist->next;
|
||||
free(this_accesslist->list);
|
||||
free(this_accesslist);
|
||||
@ -76,8 +74,8 @@ static ot_accesslist * accesslist_free(ot_accesslist *accesslist) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static ot_accesslist * accesslist_make(ot_accesslist *next, size_t size) {
|
||||
ot_accesslist * accesslist_new = malloc(sizeof(ot_accesslist));
|
||||
static ot_accesslist *accesslist_make(ot_accesslist *next, size_t size) {
|
||||
ot_accesslist *accesslist_new = malloc(sizeof(ot_accesslist));
|
||||
if (accesslist_new) {
|
||||
accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL;
|
||||
accesslist_new->size = size;
|
||||
@ -102,76 +100,77 @@ static void accesslist_clean(ot_accesslist *accesslist) {
|
||||
}
|
||||
|
||||
/* Read initial access list */
|
||||
static void accesslist_readfile( void ) {
|
||||
ot_accesslist * accesslist_new;
|
||||
ot_hash *info_hash;
|
||||
const char *map, *map_end, *read_offs;
|
||||
size_t maplen;
|
||||
static void accesslist_readfile(void) {
|
||||
ot_accesslist *accesslist_new;
|
||||
ot_hash *info_hash;
|
||||
const char *map, *map_end, *read_offs;
|
||||
size_t maplen;
|
||||
|
||||
if( ( map = mmap_read( g_accesslist_filename, &maplen ) ) == NULL ) {
|
||||
char *wd = getcwd( NULL, 0 );
|
||||
fprintf( stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd );
|
||||
free( wd );
|
||||
if ((map = mmap_read(g_accesslist_filename, &maplen)) == NULL) {
|
||||
char *wd = getcwd(NULL, 0);
|
||||
fprintf(stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd);
|
||||
free(wd);
|
||||
return;
|
||||
}
|
||||
|
||||
/* You need at least 41 bytes to pass an info_hash, make enough room
|
||||
for the maximum amount of them */
|
||||
accesslist_new = accesslist_make(g_accesslist, maplen / 41);
|
||||
if( !accesslist_new ) {
|
||||
fprintf( stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", ( maplen / 41 ) * 20 );
|
||||
mmap_unmap( map, maplen);
|
||||
if (!accesslist_new) {
|
||||
fprintf(stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", (maplen / 41) * 20);
|
||||
mmap_unmap(map, maplen);
|
||||
return;
|
||||
}
|
||||
info_hash = accesslist_new->list;
|
||||
|
||||
/* No use to scan if there's not enough room for another full info_hash */
|
||||
map_end = map + maplen - 40;
|
||||
map_end = map + maplen - 40;
|
||||
read_offs = map;
|
||||
|
||||
/* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */
|
||||
while( read_offs <= map_end ) {
|
||||
while (read_offs <= map_end) {
|
||||
int i;
|
||||
for( i=0; i<(int)sizeof(ot_hash); ++i ) {
|
||||
int eger1 = scan_fromhex( (unsigned char)read_offs[ 2*i ] );
|
||||
int eger2 = scan_fromhex( (unsigned char)read_offs[ 1 + 2*i ] );
|
||||
if( eger1 < 0 || eger2 < 0 )
|
||||
for (i = 0; i < (int)sizeof(ot_hash); ++i) {
|
||||
int eger1 = scan_fromhex((unsigned char)read_offs[2 * i]);
|
||||
int eger2 = scan_fromhex((unsigned char)read_offs[1 + 2 * i]);
|
||||
if (eger1 < 0 || eger2 < 0)
|
||||
break;
|
||||
(*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
|
||||
}
|
||||
|
||||
if( i == sizeof(ot_hash) ) {
|
||||
if (i == sizeof(ot_hash)) {
|
||||
read_offs += 40;
|
||||
|
||||
/* Append accesslist to accesslist vector */
|
||||
if( read_offs == map_end || scan_fromhex( (unsigned char)*read_offs ) < 0 )
|
||||
if (read_offs == map_end || scan_fromhex((unsigned char)*read_offs) < 0)
|
||||
++info_hash;
|
||||
}
|
||||
|
||||
/* Find start of next line */
|
||||
while( read_offs <= map_end && *(read_offs++) != '\n' );
|
||||
while (read_offs <= map_end && *(read_offs++) != '\n')
|
||||
;
|
||||
}
|
||||
#ifdef _DEBUG
|
||||
fprintf( stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list) );
|
||||
fprintf(stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list));
|
||||
#endif
|
||||
|
||||
mmap_unmap( map, maplen);
|
||||
mmap_unmap(map, maplen);
|
||||
|
||||
qsort( accesslist_new->list, info_hash - accesslist_new->list, sizeof( *info_hash ), vector_compare_hash );
|
||||
qsort(accesslist_new->list, info_hash - accesslist_new->list, sizeof(*info_hash), vector_compare_hash);
|
||||
accesslist_new->size = info_hash - accesslist_new->list;
|
||||
|
||||
/* Now exchange the accesslist vector in the least race condition prone way */
|
||||
pthread_mutex_lock(&g_accesslist_mutex);
|
||||
accesslist_new->next = g_accesslist;
|
||||
g_accesslist = accesslist_new; /* Only now set a new list */
|
||||
g_accesslist = accesslist_new; /* Only now set a new list */
|
||||
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
/* If we have dynamic accesslists, reloading a new one will always void the add/delete lists.
|
||||
Insert empty ones at the list head */
|
||||
if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL)
|
||||
g_accesslist_add = accesslist_new;
|
||||
g_accesslist_add = accesslist_new;
|
||||
if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL)
|
||||
g_accesslist_delete = accesslist_new;
|
||||
g_accesslist_delete = accesslist_new;
|
||||
#endif
|
||||
|
||||
accesslist_clean(g_accesslist);
|
||||
@ -179,26 +178,26 @@ static void accesslist_readfile( void ) {
|
||||
pthread_mutex_unlock(&g_accesslist_mutex);
|
||||
}
|
||||
|
||||
int accesslist_hashisvalid( ot_hash hash ) {
|
||||
int accesslist_hashisvalid(ot_hash hash) {
|
||||
/* Get working copy of current access list */
|
||||
ot_accesslist * accesslist = g_accesslist;
|
||||
ot_accesslist *accesslist = g_accesslist;
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
ot_accesslist * accesslist_add, * accesslist_delete;
|
||||
ot_accesslist *accesslist_add, *accesslist_delete;
|
||||
#endif
|
||||
void * exactmatch = NULL;
|
||||
void *exactmatch = NULL;
|
||||
|
||||
if (accesslist)
|
||||
exactmatch = bsearch( hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash );
|
||||
exactmatch = bsearch(hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
|
||||
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
/* If we had no match on the main list, scan the list of dynamically added hashes */
|
||||
accesslist_add = g_accesslist_add;
|
||||
if ((exactmatch == NULL) && accesslist_add)
|
||||
exactmatch = bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash );
|
||||
exactmatch = bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
|
||||
|
||||
/* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */
|
||||
accesslist_delete = g_accesslist_delete;
|
||||
if ((exactmatch != NULL) && accesslist_delete && bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ))
|
||||
if ((exactmatch != NULL) && accesslist_delete && bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash))
|
||||
exactmatch = NULL;
|
||||
#endif
|
||||
|
||||
@ -209,31 +208,32 @@ int accesslist_hashisvalid( ot_hash hash ) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static void * accesslist_worker( void * args ) {
|
||||
int sig;
|
||||
sigset_t signal_mask;
|
||||
static void *accesslist_worker(void *args) {
|
||||
int sig;
|
||||
sigset_t signal_mask;
|
||||
|
||||
sigemptyset(&signal_mask);
|
||||
sigaddset(&signal_mask, SIGHUP);
|
||||
|
||||
(void)args;
|
||||
|
||||
while( 1 ) {
|
||||
while (1) {
|
||||
if (!g_opentracker_running)
|
||||
return NULL;
|
||||
return NULL;
|
||||
|
||||
/* Initial attempt to read accesslist */
|
||||
accesslist_readfile( );
|
||||
accesslist_readfile();
|
||||
|
||||
/* Wait for signals */
|
||||
while( sigwait (&signal_mask, &sig) != 0 && sig != SIGHUP );
|
||||
while (sigwait(&signal_mask, &sig) != 0 && sig != SIGHUP)
|
||||
;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
static pthread_t thread_adder_id, thread_deleter_id;
|
||||
static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic * adding_to, ot_accesslist * _Atomic * removing_from) {
|
||||
static void *accesslist_adddel_worker(char *fifoname, ot_accesslist *_Atomic *adding_to, ot_accesslist *_Atomic *removing_from) {
|
||||
struct stat st;
|
||||
|
||||
if (!stat(fifoname, &st)) {
|
||||
@ -250,9 +250,9 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
|
||||
}
|
||||
|
||||
while (g_opentracker_running) {
|
||||
FILE * fifo = fopen(fifoname, "r");
|
||||
char *line = NULL;
|
||||
size_t linecap = 0;
|
||||
FILE *fifo = fopen(fifoname, "r");
|
||||
char *line = NULL;
|
||||
size_t linecap = 0;
|
||||
ssize_t linelen;
|
||||
|
||||
if (!fifo) {
|
||||
@ -262,7 +262,7 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
|
||||
|
||||
while ((linelen = getline(&line, &linecap, fifo)) > 0) {
|
||||
ot_hash info_hash;
|
||||
int i;
|
||||
int i;
|
||||
|
||||
printf("Got line %*s", (int)linelen, line);
|
||||
/* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*"
|
||||
@ -270,15 +270,15 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
|
||||
if (linelen < 41)
|
||||
continue;
|
||||
|
||||
for( i=0; i<(int)sizeof(ot_hash); ++i ) {
|
||||
int eger1 = scan_fromhex( (unsigned char)line[ 2*i ] );
|
||||
int eger2 = scan_fromhex( (unsigned char)line[ 1 + 2*i ] );
|
||||
if( eger1 < 0 || eger2 < 0 )
|
||||
for (i = 0; i < (int)sizeof(ot_hash); ++i) {
|
||||
int eger1 = scan_fromhex((unsigned char)line[2 * i]);
|
||||
int eger2 = scan_fromhex((unsigned char)line[1 + 2 * i]);
|
||||
if (eger1 < 0 || eger2 < 0)
|
||||
break;
|
||||
((uint8_t*)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
|
||||
((uint8_t *)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
|
||||
}
|
||||
printf("parsed info_hash %20s\n", info_hash);
|
||||
if( i != sizeof(ot_hash) )
|
||||
printf("parsed info_hash %20s\n", info_hash);
|
||||
if (i != sizeof(ot_hash))
|
||||
continue;
|
||||
|
||||
/* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the
|
||||
@ -287,10 +287,10 @@ printf("parsed info_hash %20s\n", info_hash);
|
||||
|
||||
/* If the info hash is in the removing_from list, create a new head without that entry */
|
||||
if (*removing_from && (*removing_from)->list) {
|
||||
ot_hash * exactmatch = bsearch( info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash );
|
||||
ot_hash *exactmatch = bsearch(info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
|
||||
if (exactmatch) {
|
||||
ptrdiff_t off = exactmatch - (*removing_from)->list;
|
||||
ot_accesslist * accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1);
|
||||
ptrdiff_t off = exactmatch - (*removing_from)->list;
|
||||
ot_accesslist *accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1);
|
||||
if (accesslist_new) {
|
||||
memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off);
|
||||
memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1);
|
||||
@ -301,19 +301,19 @@ printf("parsed info_hash %20s\n", info_hash);
|
||||
|
||||
/* Simple case: there's no adding_to list yet, create one with one member */
|
||||
if (!*adding_to) {
|
||||
ot_accesslist * accesslist_new = accesslist_make(NULL, 1);
|
||||
ot_accesslist *accesslist_new = accesslist_make(NULL, 1);
|
||||
if (accesslist_new) {
|
||||
memcpy(accesslist_new->list, info_hash, sizeof(ot_hash));
|
||||
*adding_to = accesslist_new;
|
||||
}
|
||||
} else {
|
||||
int exactmatch = 0;
|
||||
ot_hash * insert_point = binary_search( info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch );
|
||||
int exactmatch = 0;
|
||||
ot_hash *insert_point = binary_search(info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch);
|
||||
|
||||
/* Only if the info hash is not in the adding_to list, create a new head with that entry */
|
||||
if (!exactmatch) {
|
||||
ot_accesslist * accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1);
|
||||
ptrdiff_t off = insert_point - (*adding_to)->list;
|
||||
ot_accesslist *accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1);
|
||||
ptrdiff_t off = insert_point - (*adding_to)->list;
|
||||
if (accesslist_new) {
|
||||
memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off);
|
||||
memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash));
|
||||
@ -331,29 +331,29 @@ printf("parsed info_hash %20s\n", info_hash);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void * accesslist_adder_worker( void * args ) {
|
||||
static void *accesslist_adder_worker(void *args) {
|
||||
(void)args;
|
||||
return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete);
|
||||
}
|
||||
static void * accesslist_deleter_worker( void * args ) {
|
||||
static void *accesslist_deleter_worker(void *args) {
|
||||
(void)args;
|
||||
return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add);
|
||||
}
|
||||
#endif
|
||||
|
||||
static pthread_t thread_id;
|
||||
void accesslist_init( ) {
|
||||
void accesslist_init() {
|
||||
pthread_mutex_init(&g_accesslist_mutex, NULL);
|
||||
pthread_create( &thread_id, NULL, accesslist_worker, NULL );
|
||||
pthread_create(&thread_id, NULL, accesslist_worker, NULL);
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
if (g_accesslist_pipe_add)
|
||||
pthread_create( &thread_adder_id, NULL, accesslist_adder_worker, NULL );
|
||||
pthread_create(&thread_adder_id, NULL, accesslist_adder_worker, NULL);
|
||||
if (g_accesslist_pipe_delete)
|
||||
pthread_create( &thread_deleter_id, NULL, accesslist_deleter_worker, NULL );
|
||||
pthread_create(&thread_deleter_id, NULL, accesslist_deleter_worker, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void accesslist_deinit( void ) {
|
||||
void accesslist_deinit(void) {
|
||||
/* Wake up sleeping worker */
|
||||
pthread_kill(thread_id, SIGHUP);
|
||||
|
||||
@ -362,16 +362,16 @@ void accesslist_deinit( void ) {
|
||||
g_accesslist = accesslist_free(g_accesslist);
|
||||
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
g_accesslist_add = accesslist_free(g_accesslist_add);
|
||||
g_accesslist_add = accesslist_free(g_accesslist_add);
|
||||
g_accesslist_delete = accesslist_free(g_accesslist_delete);
|
||||
#endif
|
||||
|
||||
pthread_mutex_unlock(&g_accesslist_mutex);
|
||||
pthread_cancel( thread_id );
|
||||
pthread_cancel(thread_id);
|
||||
pthread_mutex_destroy(&g_accesslist_mutex);
|
||||
}
|
||||
|
||||
void accesslist_cleanup( void ) {
|
||||
void accesslist_cleanup(void) {
|
||||
pthread_mutex_lock(&g_accesslist_mutex);
|
||||
|
||||
accesslist_clean(g_accesslist);
|
||||
@ -384,35 +384,34 @@ void accesslist_cleanup( void ) {
|
||||
}
|
||||
#endif
|
||||
|
||||
int address_in_net( const ot_ip6 address, const ot_net *net ) {
|
||||
int bits = net->bits, checkbits = ( 0x7f00 >> ( bits & 7 ));
|
||||
int result = memcmp( address, &net->address, bits >> 3 );
|
||||
if( !result && ( bits & 7 ) )
|
||||
result = ( checkbits & address[bits>>3] ) - ( checkbits & net->address[bits>>3]);
|
||||
int address_in_net(const ot_ip6 address, const ot_net *net) {
|
||||
int bits = net->bits, checkbits = (0x7f00 >> (bits & 7));
|
||||
int result = memcmp(address, &net->address, bits >> 3);
|
||||
if (!result && (bits & 7))
|
||||
result = (checkbits & address[bits >> 3]) - (checkbits & net->address[bits >> 3]);
|
||||
return result == 0;
|
||||
}
|
||||
|
||||
void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ) {
|
||||
void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size) {
|
||||
size_t i;
|
||||
int exactmatch;
|
||||
int exactmatch;
|
||||
|
||||
/* Caller must have a concept of ot_net in it's member */
|
||||
if( member_size < sizeof(ot_net) )
|
||||
if (member_size < sizeof(ot_net))
|
||||
return 0;
|
||||
|
||||
/* Check each net in vector for overlap */
|
||||
uint8_t *member = ((uint8_t*)vector->data);
|
||||
for( i=0; i<vector->size; ++i ) {
|
||||
if( address_in_net( *(ot_ip6*)member, net ) ||
|
||||
address_in_net( net->address, (ot_net*)member ) )
|
||||
uint8_t *member = ((uint8_t *)vector->data);
|
||||
for (i = 0; i < vector->size; ++i) {
|
||||
if (address_in_net(*(ot_ip6 *)member, net) || address_in_net(net->address, (ot_net *)member))
|
||||
return 0;
|
||||
member += member_size;
|
||||
}
|
||||
|
||||
member = vector_find_or_insert( vector, (void*)net, member_size, sizeof(ot_net), &exactmatch );
|
||||
if( member ) {
|
||||
memcpy( member, net, sizeof(ot_net));
|
||||
memcpy( member + sizeof(ot_net), value, member_size - sizeof(ot_net));
|
||||
member = vector_find_or_insert(vector, (void *)net, member_size, sizeof(ot_net), &exactmatch);
|
||||
if (member) {
|
||||
memcpy(member, net, sizeof(ot_net));
|
||||
memcpy(member + sizeof(ot_net), value, member_size - sizeof(ot_net));
|
||||
}
|
||||
|
||||
return member;
|
||||
@ -420,43 +419,43 @@ void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value
|
||||
|
||||
/* Takes a vector filled with { ot_net net, uint8_t[x] value };
|
||||
Returns value associated with the net, or NULL if not found */
|
||||
void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ) {
|
||||
int exactmatch;
|
||||
void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size) {
|
||||
int exactmatch;
|
||||
/* This binary search will return a pointer to the first non-containing network... */
|
||||
ot_net *net = binary_search( address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch );
|
||||
if( !net )
|
||||
ot_net *net = binary_search(address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch);
|
||||
if (!net)
|
||||
return NULL;
|
||||
/* ... so we'll need to move back one step unless we've exactly hit the first address in network */
|
||||
if( !exactmatch && ( (void*)net > vector->data ) )
|
||||
if (!exactmatch && ((void *)net > vector->data))
|
||||
--net;
|
||||
if( !address_in_net( address, net ) )
|
||||
if (!address_in_net(address, net))
|
||||
return NULL;
|
||||
return (void*)net;
|
||||
return (void *)net;
|
||||
}
|
||||
|
||||
#ifdef WANT_FULLLOG_NETWORKS
|
||||
static ot_vector g_lognets_list;
|
||||
ot_log *g_logchain_first, *g_logchain_last;
|
||||
|
||||
static ot_vector g_lognets_list;
|
||||
ot_log *g_logchain_first, *g_logchain_last;
|
||||
static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
void loglist_add_network( const ot_net *net ) {
|
||||
|
||||
void loglist_add_network(const ot_net *net) {
|
||||
pthread_mutex_lock(&g_lognets_list_mutex);
|
||||
set_value_for_net( net, &g_lognets_list, NULL, sizeof(ot_net));
|
||||
set_value_for_net(net, &g_lognets_list, NULL, sizeof(ot_net));
|
||||
pthread_mutex_unlock(&g_lognets_list_mutex);
|
||||
}
|
||||
|
||||
void loglist_reset( ) {
|
||||
void loglist_reset() {
|
||||
pthread_mutex_lock(&g_lognets_list_mutex);
|
||||
free( g_lognets_list.data );
|
||||
free(g_lognets_list.data);
|
||||
g_lognets_list.data = 0;
|
||||
g_lognets_list.size = g_lognets_list.space = 0;
|
||||
pthread_mutex_unlock(&g_lognets_list_mutex);
|
||||
pthread_mutex_unlock(&g_lognets_list_mutex);
|
||||
}
|
||||
|
||||
int loglist_check_address( const ot_ip6 address ) {
|
||||
int loglist_check_address(const ot_ip6 address) {
|
||||
int result;
|
||||
pthread_mutex_lock(&g_lognets_list_mutex);
|
||||
result = ( NULL != get_value_for_net( address, &g_lognets_list, sizeof(ot_net)) );
|
||||
result = (NULL != get_value_for_net(address, &g_lognets_list, sizeof(ot_net)));
|
||||
pthread_mutex_unlock(&g_lognets_list_mutex);
|
||||
return result;
|
||||
}
|
||||
@ -464,44 +463,44 @@ int loglist_check_address( const ot_ip6 address ) {
|
||||
|
||||
#ifdef WANT_IP_FROM_PROXY
|
||||
typedef struct {
|
||||
ot_net *proxy;
|
||||
ot_vector networks;
|
||||
ot_net *proxy;
|
||||
ot_vector networks;
|
||||
} ot_proxymap;
|
||||
|
||||
static ot_vector g_proxies_list;
|
||||
static ot_vector g_proxies_list;
|
||||
static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
int proxylist_add_network( const ot_net *proxy, const ot_net *net ) {
|
||||
int proxylist_add_network(const ot_net *proxy, const ot_net *net) {
|
||||
ot_proxymap *map;
|
||||
int exactmatch, result = 1;
|
||||
pthread_mutex_lock(&g_proxies_list_mutex);
|
||||
|
||||
/* If we have a direct hit, use and extend the vector there */
|
||||
map = binary_search( proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch );
|
||||
map = binary_search(proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch);
|
||||
|
||||
if( !map || !exactmatch ) {
|
||||
if (!map || !exactmatch) {
|
||||
/* else see, if we've got overlapping networks
|
||||
and get a new empty vector if not */
|
||||
ot_vector empty;
|
||||
memset( &empty, 0, sizeof( ot_vector ) );
|
||||
map = set_value_for_net( proxy, &g_proxies_list, &empty, sizeof(ot_proxymap));
|
||||
memset(&empty, 0, sizeof(ot_vector));
|
||||
map = set_value_for_net(proxy, &g_proxies_list, &empty, sizeof(ot_proxymap));
|
||||
}
|
||||
|
||||
if( map && set_value_for_net( net, &map->networks, NULL, sizeof(ot_net) ) )
|
||||
result = 1;
|
||||
if (map && set_value_for_net(net, &map->networks, NULL, sizeof(ot_net)))
|
||||
result = 1;
|
||||
|
||||
pthread_mutex_unlock(&g_proxies_list_mutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) {
|
||||
int proxylist_check_proxy(const ot_ip6 proxy, const ot_ip6 address) {
|
||||
int result = 0;
|
||||
ot_proxymap *map;
|
||||
|
||||
pthread_mutex_lock(&g_proxies_list_mutex);
|
||||
|
||||
if( ( map = get_value_for_net( proxy, &g_proxies_list, sizeof(ot_proxymap) ) ) )
|
||||
if( !address || get_value_for_net( address, &map->networks, sizeof(ot_net) ) )
|
||||
if ((map = get_value_for_net(proxy, &g_proxies_list, sizeof(ot_proxymap))))
|
||||
if (!address || get_value_for_net(address, &map->networks, sizeof(ot_net)))
|
||||
result = 1;
|
||||
|
||||
pthread_mutex_unlock(&g_proxies_list_mutex);
|
||||
@ -514,44 +513,49 @@ static ot_net g_admin_nets[OT_ADMINIP_MAX];
|
||||
static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX];
|
||||
static unsigned int g_admin_nets_count = 0;
|
||||
|
||||
int accesslist_bless_net( ot_net *net, ot_permissions permissions ) {
|
||||
if( g_admin_nets_count >= OT_ADMINIP_MAX )
|
||||
int accesslist_bless_net(ot_net *net, ot_permissions permissions) {
|
||||
if (g_admin_nets_count >= OT_ADMINIP_MAX)
|
||||
return -1;
|
||||
|
||||
memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net));
|
||||
g_admin_nets_permissions[ g_admin_nets_count++ ] = permissions;
|
||||
g_admin_nets_permissions[g_admin_nets_count++] = permissions;
|
||||
|
||||
#ifdef _DEBUG
|
||||
{
|
||||
char _debug[512];
|
||||
int off = snprintf( _debug, sizeof(_debug), "Blessing ip net " );
|
||||
off += fmt_ip6c(_debug+off, net->address );
|
||||
if( net->bits < 128) {
|
||||
int off = snprintf(_debug, sizeof(_debug), "Blessing ip net ");
|
||||
off += fmt_ip6c(_debug + off, net->address);
|
||||
if (net->bits < 128) {
|
||||
_debug[off++] = '/';
|
||||
if( ip6_isv4mapped(net->address) )
|
||||
off += fmt_long(_debug+off, net->bits-96);
|
||||
if (ip6_isv4mapped(net->address))
|
||||
off += fmt_long(_debug + off, net->bits - 96);
|
||||
else
|
||||
off += fmt_long(_debug+off, net->bits);
|
||||
off += fmt_long(_debug + off, net->bits);
|
||||
}
|
||||
|
||||
if( permissions & OT_PERMISSION_MAY_STAT ) off += snprintf( _debug+off, 512-off, " may_fetch_stats" );
|
||||
if( permissions & OT_PERMISSION_MAY_LIVESYNC ) off += snprintf( _debug+off, 512-off, " may_sync_live" );
|
||||
if( permissions & OT_PERMISSION_MAY_FULLSCRAPE ) off += snprintf( _debug+off, 512-off, " may_fetch_fullscrapes" );
|
||||
if( permissions & OT_PERMISSION_MAY_PROXY ) off += snprintf( _debug+off, 512-off, " may_proxy" );
|
||||
if( !permissions ) off += snprintf( _debug+off, sizeof(_debug)-off, " nothing" );
|
||||
if (permissions & OT_PERMISSION_MAY_STAT)
|
||||
off += snprintf(_debug + off, 512 - off, " may_fetch_stats");
|
||||
if (permissions & OT_PERMISSION_MAY_LIVESYNC)
|
||||
off += snprintf(_debug + off, 512 - off, " may_sync_live");
|
||||
if (permissions & OT_PERMISSION_MAY_FULLSCRAPE)
|
||||
off += snprintf(_debug + off, 512 - off, " may_fetch_fullscrapes");
|
||||
if (permissions & OT_PERMISSION_MAY_PROXY)
|
||||
off += snprintf(_debug + off, 512 - off, " may_proxy");
|
||||
if (!permissions)
|
||||
off += snprintf(_debug + off, sizeof(_debug) - off, " nothing");
|
||||
_debug[off++] = '.';
|
||||
_debug[off++] = '\n';
|
||||
(void)write( 2, _debug, off );
|
||||
(void)write(2, _debug, off);
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int accesslist_is_blessed( ot_ip6 ip, ot_permissions permissions ) {
|
||||
int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions) {
|
||||
unsigned int i;
|
||||
for( i=0; i<g_admin_nets_count; ++i )
|
||||
if( address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[ i ] & permissions ))
|
||||
for (i = 0; i < g_admin_nets_count; ++i)
|
||||
if (address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[i] & permissions))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,16 +6,18 @@
|
||||
#ifndef OT_ACCESSLIST_H__
|
||||
#define OT_ACCESSLIST_H__
|
||||
|
||||
#if defined ( WANT_ACCESSLIST_BLACK ) && defined ( WANT_ACCESSLIST_WHITE )
|
||||
# error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive.
|
||||
#include "trackerlogic.h"
|
||||
|
||||
#if defined(WANT_ACCESSLIST_BLACK) && defined(WANT_ACCESSLIST_WHITE)
|
||||
#error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive.
|
||||
#endif
|
||||
|
||||
#if defined ( WANT_ACCESSLIST_BLACK ) || defined (WANT_ACCESSLIST_WHITE )
|
||||
#if defined(WANT_ACCESSLIST_BLACK) || defined(WANT_ACCESSLIST_WHITE)
|
||||
#define WANT_ACCESSLIST
|
||||
void accesslist_init( void );
|
||||
void accesslist_deinit( void );
|
||||
int accesslist_hashisvalid( ot_hash hash );
|
||||
void accesslist_cleanup( void );
|
||||
void accesslist_init(void);
|
||||
void accesslist_deinit(void);
|
||||
int accesslist_hashisvalid(ot_hash hash);
|
||||
void accesslist_cleanup(void);
|
||||
|
||||
extern char *g_accesslist_filename;
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
@ -25,16 +27,16 @@ extern char *g_accesslist_pipe_delete;
|
||||
|
||||
#else
|
||||
#ifdef WANT_DYNAMIC_ACCESSLIST
|
||||
# error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE
|
||||
#error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE
|
||||
#endif
|
||||
|
||||
#define accesslist_init( accesslist_filename )
|
||||
#define accesslist_deinit( )
|
||||
#define accesslist_hashisvalid( hash ) 1
|
||||
#define accesslist_init(accesslist_filename)
|
||||
#define accesslist_deinit()
|
||||
#define accesslist_hashisvalid(hash) 1
|
||||
#endif
|
||||
|
||||
/* Test if an address is subset of an ot_net, return value is considered a bool */
|
||||
int address_in_net( const ot_ip6 address, const ot_net *net );
|
||||
int address_in_net(const ot_ip6 address, const ot_net *net);
|
||||
|
||||
/* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member;
|
||||
returns NULL
|
||||
@ -45,18 +47,17 @@ int address_in_net( const ot_ip6 address, const ot_net *net );
|
||||
returns pointer to new member in vector for success
|
||||
member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
|
||||
*/
|
||||
void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size );
|
||||
void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size);
|
||||
|
||||
/* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member;
|
||||
Returns pointer to _member_ associated with the net, or NULL if not found
|
||||
member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
|
||||
*/
|
||||
void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size );
|
||||
|
||||
void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size);
|
||||
|
||||
#ifdef WANT_IP_FROM_PROXY
|
||||
int proxylist_add_network( const ot_net *proxy, const ot_net *net );
|
||||
int proxylist_check_network( const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */ );
|
||||
int proxylist_add_network(const ot_net *proxy, const ot_net *net);
|
||||
int proxylist_check_network(const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */);
|
||||
#endif
|
||||
|
||||
#ifdef WANT_FULLLOG_NETWORKS
|
||||
@ -70,10 +71,10 @@ struct ot_log {
|
||||
};
|
||||
extern ot_log *g_logchain_first, *g_logchain_last;
|
||||
|
||||
void loglist_add_network( const ot_net *net );
|
||||
void loglist_reset( );
|
||||
int loglist_check_address( const ot_ip6 address );
|
||||
#endif
|
||||
void loglist_add_network(const ot_net *net);
|
||||
void loglist_reset();
|
||||
int loglist_check_address(const ot_ip6 address);
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
OT_PERMISSION_MAY_FULLSCRAPE = 0x1,
|
||||
@ -82,7 +83,7 @@ typedef enum {
|
||||
OT_PERMISSION_MAY_PROXY = 0x8
|
||||
} ot_permissions;
|
||||
|
||||
int accesslist_bless_net( ot_net *net, ot_permissions permissions );
|
||||
int accesslist_is_blessed( ot_ip6 ip, ot_permissions permissions );
|
||||
int accesslist_bless_net(ot_net *net, ot_permissions permissions);
|
||||
int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions);
|
||||
|
||||
#endif
|
||||
|
104
ot_clean.c
104
ot_clean.c
@ -5,80 +5,79 @@
|
||||
|
||||
/* System */
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* Libowfat */
|
||||
#include "io.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "ot_vector.h"
|
||||
#include "ot_clean.h"
|
||||
#include "ot_stats.h"
|
||||
#include "ot_accesslist.h"
|
||||
#include "ot_clean.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "ot_stats.h"
|
||||
#include "ot_vector.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
/* Returns amount of removed peers */
|
||||
static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders ) {
|
||||
static ssize_t clean_single_bucket(ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders) {
|
||||
ot_peer *last_peer = peers + peer_count * peer_size, *insert_point;
|
||||
|
||||
/* Two scan modes: unless there is one peer removed, just increase ot_peertime */
|
||||
while( peers < last_peer ) {
|
||||
time_t timediff = timedout + OT_PEERTIME( peers, peer_size );
|
||||
if( timediff >= OT_PEER_TIMEOUT )
|
||||
while (peers < last_peer) {
|
||||
time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
|
||||
if (timediff >= OT_PEER_TIMEOUT)
|
||||
break;
|
||||
OT_PEERTIME( peers, peer_size ) = timediff;
|
||||
OT_PEERTIME(peers, peer_size) = timediff;
|
||||
peers += peer_size;
|
||||
}
|
||||
|
||||
/* If we at least remove one peer, we have to copy */
|
||||
for( insert_point = peers; peers < last_peer; peers += peer_size ) {
|
||||
time_t timediff = timedout + OT_PEERTIME( peers, peer_size );
|
||||
for (insert_point = peers; peers < last_peer; peers += peer_size) {
|
||||
time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
|
||||
|
||||
if( timediff < OT_PEER_TIMEOUT ) {
|
||||
OT_PEERTIME( peers, peer_size ) = timediff;
|
||||
memcpy( insert_point, peers, peer_size);
|
||||
if (timediff < OT_PEER_TIMEOUT) {
|
||||
OT_PEERTIME(peers, peer_size) = timediff;
|
||||
memcpy(insert_point, peers, peer_size);
|
||||
insert_point += peer_size;
|
||||
} else
|
||||
if( OT_PEERFLAG_D( peers, peer_size ) & PEER_FLAG_SEEDING )
|
||||
(*removed_seeders)++;
|
||||
} else if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING)
|
||||
(*removed_seeders)++;
|
||||
}
|
||||
|
||||
return (peers - insert_point) / peer_size;
|
||||
}
|
||||
|
||||
int clean_single_peer_list( ot_peerlist *peer_list, size_t peer_size ) {
|
||||
int clean_single_peer_list(ot_peerlist *peer_list, size_t peer_size) {
|
||||
ot_vector *peer_vector = &peer_list->peers;
|
||||
time_t timedout = (time_t)( g_now_minutes - peer_list->base );
|
||||
int num_buckets = 1, removed_seeders = 0;
|
||||
time_t timedout = (time_t)(g_now_minutes - peer_list->base);
|
||||
int num_buckets = 1, removed_seeders = 0;
|
||||
|
||||
/* No need to clean empty torrent */
|
||||
if( !timedout )
|
||||
if (!timedout)
|
||||
return 0;
|
||||
|
||||
/* Torrent has idled out */
|
||||
if( timedout > OT_TORRENT_TIMEOUT )
|
||||
if (timedout > OT_TORRENT_TIMEOUT)
|
||||
return 1;
|
||||
|
||||
/* Nothing to be cleaned here? Test if torrent is worth keeping */
|
||||
if( timedout > OT_PEER_TIMEOUT ) {
|
||||
if( !peer_list->peer_count )
|
||||
if (timedout > OT_PEER_TIMEOUT) {
|
||||
if (!peer_list->peer_count)
|
||||
return peer_list->down_count ? 0 : 1;
|
||||
timedout = OT_PEER_TIMEOUT;
|
||||
}
|
||||
|
||||
if( OT_PEERLIST_HASBUCKETS( peer_list ) ) {
|
||||
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
|
||||
num_buckets = peer_vector->size;
|
||||
peer_vector = (ot_vector *)peer_vector->data;
|
||||
}
|
||||
|
||||
while( num_buckets-- ) {
|
||||
size_t removed_peers = clean_single_bucket( peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders );
|
||||
while (num_buckets--) {
|
||||
size_t removed_peers = clean_single_bucket(peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders);
|
||||
peer_list->peer_count -= removed_peers;
|
||||
peer_vector->size -= removed_peers;
|
||||
if( removed_peers )
|
||||
vector_fixup_peers( peer_vector, peer_size );
|
||||
if (removed_peers)
|
||||
vector_fixup_peers(peer_vector, peer_size);
|
||||
|
||||
/* Skip to next bucket, a vector containing peers */
|
||||
++peer_vector;
|
||||
@ -87,10 +86,10 @@ int clean_single_peer_list( ot_peerlist *peer_list, size_t peer_size ) {
|
||||
peer_list->seed_count -= removed_seeders;
|
||||
|
||||
/* See if we need to convert a torrent from simple vector to bucket list */
|
||||
if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) )
|
||||
vector_redistribute_buckets( peer_list, peer_size );
|
||||
if ((peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT) || OT_PEERLIST_HASBUCKETS(peer_list))
|
||||
vector_redistribute_buckets(peer_list, peer_size);
|
||||
|
||||
if( peer_list->peer_count )
|
||||
if (peer_list->peer_count)
|
||||
peer_list->base = g_now_minutes;
|
||||
else {
|
||||
/* When we got here, the last time that torrent
|
||||
@ -103,34 +102,33 @@ int clean_single_peer_list( ot_peerlist *peer_list, size_t peer_size ) {
|
||||
/* Clean a single torrent
|
||||
return 1 if torrent timed out
|
||||
*/
|
||||
int clean_single_torrent( ot_torrent *torrent ) {
|
||||
return clean_single_peer_list( torrent->peer_list6, OT_PEER_SIZE6) *
|
||||
clean_single_peer_list( torrent->peer_list4, OT_PEER_SIZE4);
|
||||
int clean_single_torrent(ot_torrent *torrent) {
|
||||
return clean_single_peer_list(torrent->peer_list6, OT_PEER_SIZE6) * clean_single_peer_list(torrent->peer_list4, OT_PEER_SIZE4);
|
||||
}
|
||||
|
||||
/* Clean up all peers in current bucket, remove timedout pools and
|
||||
torrents */
|
||||
static void * clean_worker( void * args ) {
|
||||
(void) args;
|
||||
while( 1 ) {
|
||||
static void *clean_worker(void *args) {
|
||||
(void)args;
|
||||
while (1) {
|
||||
int bucket = OT_BUCKET_COUNT;
|
||||
while( bucket-- ) {
|
||||
ot_vector *torrents_list = mutex_bucket_lock( bucket );
|
||||
while (bucket--) {
|
||||
ot_vector *torrents_list = mutex_bucket_lock(bucket);
|
||||
size_t toffs;
|
||||
int delta_torrentcount = 0;
|
||||
|
||||
for( toffs=0; toffs<torrents_list->size; ++toffs ) {
|
||||
ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs;
|
||||
if( clean_single_torrent( torrent ) ) {
|
||||
vector_remove_torrent( torrents_list, torrent );
|
||||
for (toffs = 0; toffs < torrents_list->size; ++toffs) {
|
||||
ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + toffs;
|
||||
if (clean_single_torrent(torrent)) {
|
||||
vector_remove_torrent(torrents_list, torrent);
|
||||
--delta_torrentcount;
|
||||
--toffs;
|
||||
}
|
||||
}
|
||||
mutex_bucket_unlock( bucket, delta_torrentcount );
|
||||
if( !g_opentracker_running )
|
||||
mutex_bucket_unlock(bucket, delta_torrentcount);
|
||||
if (!g_opentracker_running)
|
||||
return NULL;
|
||||
usleep( OT_CLEAN_SLEEP );
|
||||
usleep(OT_CLEAN_SLEEP);
|
||||
}
|
||||
stats_cleanup();
|
||||
#ifdef WANT_ACCESSLIST
|
||||
@ -141,12 +139,8 @@ static void * clean_worker( void * args ) {
|
||||
}
|
||||
|
||||
static pthread_t thread_id;
|
||||
void clean_init( void ) {
|
||||
pthread_create( &thread_id, NULL, clean_worker, NULL );
|
||||
}
|
||||
void clean_init(void) { pthread_create(&thread_id, NULL, clean_worker, NULL); }
|
||||
|
||||
void clean_deinit( void ) {
|
||||
pthread_cancel( thread_id );
|
||||
}
|
||||
void clean_deinit(void) { pthread_cancel(thread_id); }
|
||||
|
||||
const char *g_version_clean_c = "$Source$: $Revision$\n";
|
||||
|
10
ot_clean.h
10
ot_clean.h
@ -7,13 +7,13 @@
|
||||
#define OT_CLEAN_H__
|
||||
|
||||
/* The amount of time a clean cycle should take */
|
||||
#define OT_CLEAN_INTERVAL_MINUTES 2
|
||||
#define OT_CLEAN_INTERVAL_MINUTES 2
|
||||
|
||||
/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */
|
||||
#define OT_CLEAN_SLEEP ( ( ( OT_CLEAN_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) )
|
||||
#define OT_CLEAN_SLEEP (((OT_CLEAN_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT))
|
||||
|
||||
void clean_init( void );
|
||||
void clean_deinit( void );
|
||||
int clean_single_torrent( ot_torrent *torrent );
|
||||
void clean_init(void);
|
||||
void clean_deinit(void);
|
||||
int clean_single_torrent(ot_torrent *torrent);
|
||||
|
||||
#endif
|
||||
|
285
ot_fullscrape.c
285
ot_fullscrape.c
@ -6,11 +6,11 @@
|
||||
#ifdef WANT_FULLSCRAPE
|
||||
|
||||
/* System */
|
||||
#include <sys/param.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <sys/param.h>
|
||||
#ifdef WANT_COMPRESSION_GZIP
|
||||
#include <zlib.h>
|
||||
#endif
|
||||
@ -21,46 +21,56 @@
|
||||
#include "textcode.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "ot_iovec.h"
|
||||
#include "ot_fullscrape.h"
|
||||
#include "ot_iovec.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
/* Fetch full scrape info for all torrents
|
||||
Full scrapes usually are huge and one does not want to
|
||||
allocate more memory. So lets get them in 512k units
|
||||
*/
|
||||
#define OT_SCRAPE_CHUNK_SIZE (1024*1024)
|
||||
#define OT_SCRAPE_CHUNK_SIZE (1024 * 1024)
|
||||
|
||||
/* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */
|
||||
#define OT_SCRAPE_MAXENTRYLEN 256
|
||||
|
||||
/* Forward declaration */
|
||||
static void fullscrape_make( int taskid, ot_tasktype mode);
|
||||
static void fullscrape_make(int taskid, ot_tasktype mode);
|
||||
#ifdef WANT_COMPRESSION_GZIP
|
||||
static void fullscrape_make_gzip( int taskid, ot_tasktype mode);
|
||||
static void fullscrape_make_gzip(int taskid, ot_tasktype mode);
|
||||
#endif
|
||||
|
||||
/* Converter function from memory to human readable hex strings
|
||||
XXX - Duplicated from ot_stats. Needs fix. */
|
||||
static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;}
|
||||
static char *to_hex(char *d, uint8_t *s) {
|
||||
char *m = "0123456789ABCDEF";
|
||||
char *t = d;
|
||||
char *e = d + 40;
|
||||
while (d < e) {
|
||||
*d++ = m[*s >> 4];
|
||||
*d++ = m[*s++ & 15];
|
||||
}
|
||||
*d = 0;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* This is the entry point into this worker thread
|
||||
It grabs tasks from mutex_tasklist and delivers results back
|
||||
*/
|
||||
static void * fullscrape_worker( void * args ) {
|
||||
(void) args;
|
||||
static void *fullscrape_worker(void *args) {
|
||||
(void)args;
|
||||
|
||||
while( g_opentracker_running ) {
|
||||
while (g_opentracker_running) {
|
||||
ot_tasktype tasktype = TASK_FULLSCRAPE;
|
||||
ot_taskid taskid = mutex_workqueue_poptask( &tasktype );
|
||||
ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
|
||||
#ifdef WANT_COMPRESSION_GZIP
|
||||
if (tasktype & TASK_FLAG_GZIP)
|
||||
fullscrape_make_gzip( taskid, tasktype );
|
||||
fullscrape_make_gzip(taskid, tasktype);
|
||||
else
|
||||
#endif
|
||||
fullscrape_make( taskid, tasktype );
|
||||
mutex_workqueue_pushchunked( taskid, NULL );
|
||||
fullscrape_make(taskid, tasktype);
|
||||
mutex_workqueue_pushchunked(taskid, NULL);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -83,80 +93,87 @@ static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_torrent *torre
|
||||
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
|
||||
size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
|
||||
|
||||
switch( mode & TASK_TASK_MASK ) {
|
||||
case TASK_FULLSCRAPE:
|
||||
default:
|
||||
/* push hash as bencoded string */
|
||||
*r++='2'; *r++='0'; *r++=':';
|
||||
memcpy( r, hash, sizeof(ot_hash) ); r += sizeof(ot_hash);
|
||||
/* push rest of the scrape string */
|
||||
r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count-seed_count );
|
||||
switch (mode & TASK_TASK_MASK) {
|
||||
case TASK_FULLSCRAPE:
|
||||
default:
|
||||
/* push hash as bencoded string */
|
||||
*r++ = '2';
|
||||
*r++ = '0';
|
||||
*r++ = ':';
|
||||
memcpy(r, hash, sizeof(ot_hash));
|
||||
r += sizeof(ot_hash);
|
||||
/* push rest of the scrape string */
|
||||
r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count - seed_count);
|
||||
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_ASCII:
|
||||
to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
|
||||
r += sprintf( r, ":%zd:%zd\n", seed_count, peer_count-seed_count );
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_ASCII_PLUS:
|
||||
to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
|
||||
r += sprintf( r, ":%zd:%zd:%zd\n", seed_count, peer_count-seed_count, down_count );
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_BINARY:
|
||||
memcpy( r, *hash, sizeof(ot_hash) ); r += sizeof(ot_hash);
|
||||
*(uint32_t*)(r+0) = htonl( (uint32_t) seed_count );
|
||||
*(uint32_t*)(r+4) = htonl( (uint32_t)( peer_count-seed_count) );
|
||||
r+=8;
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_URLENCODED:
|
||||
r += fmt_urlencoded( r, (char *)*hash, 20 );
|
||||
r += sprintf( r, ":%zd:%zd\n", seed_count, peer_count-seed_count );
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TRACKERSTATE:
|
||||
to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
|
||||
r += sprintf( r, ":%zd:%zd\n", torrent->peer_list6->base, down_count );
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_ASCII:
|
||||
to_hex(r, *hash);
|
||||
r += 2 * sizeof(ot_hash);
|
||||
r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_ASCII_PLUS:
|
||||
to_hex(r, *hash);
|
||||
r += 2 * sizeof(ot_hash);
|
||||
r += sprintf(r, ":%zd:%zd:%zd\n", seed_count, peer_count - seed_count, down_count);
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_BINARY:
|
||||
memcpy(r, *hash, sizeof(ot_hash));
|
||||
r += sizeof(ot_hash);
|
||||
*(uint32_t *)(r + 0) = htonl((uint32_t)seed_count);
|
||||
*(uint32_t *)(r + 4) = htonl((uint32_t)(peer_count - seed_count));
|
||||
r += 8;
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TPB_URLENCODED:
|
||||
r += fmt_urlencoded(r, (char *)*hash, 20);
|
||||
r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
|
||||
break;
|
||||
case TASK_FULLSCRAPE_TRACKERSTATE:
|
||||
to_hex(r, *hash);
|
||||
r += 2 * sizeof(ot_hash);
|
||||
r += sprintf(r, ":%zd:%zd\n", torrent->peer_list6->base, down_count);
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static void fullscrape_make( int taskid, ot_tasktype mode ) {
|
||||
int bucket;
|
||||
char *r, *re;
|
||||
struct iovec iovector = { NULL, 0 };
|
||||
static void fullscrape_make(int taskid, ot_tasktype mode) {
|
||||
int bucket;
|
||||
char *r, *re;
|
||||
struct iovec iovector = {NULL, 0};
|
||||
|
||||
/* Setup return vector... */
|
||||
r = iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE );
|
||||
if( !r )
|
||||
r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
|
||||
if (!r)
|
||||
return;
|
||||
|
||||
/* re points to low watermark */
|
||||
re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
|
||||
|
||||
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE )
|
||||
r += sprintf( r, "d5:filesd" );
|
||||
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
|
||||
r += sprintf(r, "d5:filesd");
|
||||
|
||||
/* For each bucket... */
|
||||
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
|
||||
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
|
||||
/* Get exclusive access to that bucket */
|
||||
ot_vector *torrents_list = mutex_bucket_lock( bucket );
|
||||
ot_torrent *torrents = (ot_torrent*)(torrents_list->data);
|
||||
size_t i;
|
||||
ot_vector *torrents_list = mutex_bucket_lock(bucket);
|
||||
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
|
||||
size_t i;
|
||||
|
||||
/* For each torrent in this bucket.. */
|
||||
for( i=0; i<torrents_list->size; ++i ) {
|
||||
r = fullscrape_write_one( mode, r, torrents+i, &torrents[i].hash );
|
||||
for (i = 0; i < torrents_list->size; ++i) {
|
||||
r = fullscrape_write_one(mode, r, torrents + i, &torrents[i].hash);
|
||||
|
||||
if( r > re) {
|
||||
if (r > re) {
|
||||
iovector.iov_len = r - (char *)iovector.iov_base;
|
||||
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector) ) {
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
|
||||
free(iovector.iov_base);
|
||||
return mutex_bucket_unlock( bucket, 0 );
|
||||
return mutex_bucket_unlock(bucket, 0);
|
||||
}
|
||||
/* Allocate a fresh output buffer */
|
||||
r = iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE );
|
||||
if( !r )
|
||||
return mutex_bucket_unlock( bucket, 0 );
|
||||
r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
|
||||
if (!r)
|
||||
return mutex_bucket_unlock(bucket, 0);
|
||||
|
||||
/* re points to low watermark */
|
||||
re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
|
||||
@ -164,132 +181,132 @@ static void fullscrape_make( int taskid, ot_tasktype mode ) {
|
||||
}
|
||||
|
||||
/* All torrents done: release lock on current bucket */
|
||||
mutex_bucket_unlock( bucket, 0 );
|
||||
mutex_bucket_unlock(bucket, 0);
|
||||
|
||||
/* Parent thread died? */
|
||||
if( !g_opentracker_running )
|
||||
if (!g_opentracker_running)
|
||||
return;
|
||||
}
|
||||
|
||||
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE )
|
||||
r += sprintf( r, "ee" );
|
||||
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
|
||||
r += sprintf(r, "ee");
|
||||
|
||||
/* Send rest of data */
|
||||
iovector.iov_len = r - (char *)iovector.iov_base;
|
||||
if( mutex_workqueue_pushchunked(taskid, &iovector) )
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector))
|
||||
free(iovector.iov_base);
|
||||
}
|
||||
|
||||
#ifdef WANT_COMPRESSION_GZIP
|
||||
|
||||
static void fullscrape_make_gzip( int taskid, ot_tasktype mode) {
|
||||
int bucket;
|
||||
char *r;
|
||||
struct iovec iovector = { NULL, 0 };
|
||||
int zres;
|
||||
z_stream strm;
|
||||
fprintf(stderr, "GZIP path\n");
|
||||
static void fullscrape_make_gzip(int taskid, ot_tasktype mode) {
|
||||
int bucket;
|
||||
char *r;
|
||||
struct iovec iovector = {NULL, 0};
|
||||
int zres;
|
||||
z_stream strm;
|
||||
fprintf(stderr, "GZIP path\n");
|
||||
/* Setup return vector... */
|
||||
iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE );
|
||||
if( !iovector.iov_base )
|
||||
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
|
||||
if (!iovector.iov_base)
|
||||
return;
|
||||
|
||||
byte_zero( &strm, sizeof(strm) );
|
||||
strm.next_out = (uint8_t*)iovector.iov_base;
|
||||
byte_zero(&strm, sizeof(strm));
|
||||
strm.next_out = (uint8_t *)iovector.iov_base;
|
||||
strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
|
||||
if( deflateInit2(&strm,7,Z_DEFLATED,31,9,Z_DEFAULT_STRATEGY) != Z_OK )
|
||||
fprintf( stderr, "not ok.\n" );
|
||||
if (deflateInit2(&strm, 7, Z_DEFLATED, 31, 9, Z_DEFAULT_STRATEGY) != Z_OK)
|
||||
fprintf(stderr, "not ok.\n");
|
||||
|
||||
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) {
|
||||
strm.next_in = (uint8_t*)"d5:filesd";
|
||||
strm.avail_in = strlen("d5:filesd");
|
||||
zres = deflate( &strm, Z_NO_FLUSH );
|
||||
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
|
||||
strm.next_in = (uint8_t *)"d5:filesd";
|
||||
strm.avail_in = strlen("d5:filesd");
|
||||
zres = deflate(&strm, Z_NO_FLUSH);
|
||||
}
|
||||
|
||||
/* For each bucket... */
|
||||
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
|
||||
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
|
||||
/* Get exclusive access to that bucket */
|
||||
ot_vector *torrents_list = mutex_bucket_lock( bucket );
|
||||
ot_torrent *torrents = (ot_torrent*)(torrents_list->data);
|
||||
size_t i;
|
||||
ot_vector *torrents_list = mutex_bucket_lock(bucket);
|
||||
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
|
||||
size_t i;
|
||||
|
||||
/* For each torrent in this bucket.. */
|
||||
for( i=0; i<torrents_list->size; ++i ) {
|
||||
for (i = 0; i < torrents_list->size; ++i) {
|
||||
char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
|
||||
r = fullscrape_write_one( mode, compress_buffer, torrents+i, &torrents[i].hash );
|
||||
strm.next_in = (uint8_t*)compress_buffer;
|
||||
strm.avail_in = r - compress_buffer;
|
||||
zres = deflate( &strm, Z_NO_FLUSH );
|
||||
if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) )
|
||||
fprintf( stderr, "deflate() failed while in fullscrape_make().\n" );
|
||||
r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash);
|
||||
strm.next_in = (uint8_t *)compress_buffer;
|
||||
strm.avail_in = r - compress_buffer;
|
||||
zres = deflate(&strm, Z_NO_FLUSH);
|
||||
if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
|
||||
fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
|
||||
|
||||
/* Check if there still is enough buffer left */
|
||||
while( !strm.avail_out ) {
|
||||
while (!strm.avail_out) {
|
||||
iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
|
||||
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector) ) {
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
|
||||
free(iovector.iov_base);
|
||||
return mutex_bucket_unlock( bucket, 0 );
|
||||
return mutex_bucket_unlock(bucket, 0);
|
||||
}
|
||||
/* Allocate a fresh output buffer */
|
||||
iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE );
|
||||
if( !iovector.iov_base ) {
|
||||
fprintf( stderr, "Out of memory trying to claim ouput buffer\n" );
|
||||
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
|
||||
if (!iovector.iov_base) {
|
||||
fprintf(stderr, "Out of memory trying to claim ouput buffer\n");
|
||||
deflateEnd(&strm);
|
||||
return mutex_bucket_unlock( bucket, 0 );
|
||||
return mutex_bucket_unlock(bucket, 0);
|
||||
}
|
||||
strm.next_out = (uint8_t*)iovector.iov_base;
|
||||
strm.next_out = (uint8_t *)iovector.iov_base;
|
||||
strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
|
||||
zres = deflate( &strm, Z_NO_FLUSH );
|
||||
if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) )
|
||||
fprintf( stderr, "deflate() failed while in fullscrape_make().\n" );
|
||||
zres = deflate(&strm, Z_NO_FLUSH);
|
||||
if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
|
||||
fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* All torrents done: release lock on current bucket */
|
||||
mutex_bucket_unlock( bucket, 0 );
|
||||
mutex_bucket_unlock(bucket, 0);
|
||||
|
||||
/* Parent thread died? */
|
||||
if( !g_opentracker_running )
|
||||
if (!g_opentracker_running)
|
||||
return;
|
||||
}
|
||||
|
||||
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) {
|
||||
strm.next_in = (uint8_t*)"ee";
|
||||
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
|
||||
strm.next_in = (uint8_t *)"ee";
|
||||
strm.avail_in = strlen("ee");
|
||||
}
|
||||
|
||||
if( deflate( &strm, Z_FINISH ) < Z_OK )
|
||||
fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" );
|
||||
if (deflate(&strm, Z_FINISH) < Z_OK)
|
||||
fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
|
||||
|
||||
iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector) ) {
|
||||
free(iovector.iov_base);
|
||||
return mutex_bucket_unlock( bucket, 0 );
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
|
||||
free(iovector.iov_base);
|
||||
return mutex_bucket_unlock(bucket, 0);
|
||||
}
|
||||
|
||||
{
|
||||
unsigned int pending;
|
||||
int bits;
|
||||
deflatePending( &strm, &pending, &bits);
|
||||
pending += ( bits ? 1 : 0 );
|
||||
int bits;
|
||||
deflatePending(&strm, &pending, &bits);
|
||||
pending += (bits ? 1 : 0);
|
||||
|
||||
if (pending) {
|
||||
/* Allocate a fresh output buffer */
|
||||
iovector.iov_base = malloc( pending );
|
||||
iovector.iov_len = pending;
|
||||
iovector.iov_base = malloc(pending);
|
||||
iovector.iov_len = pending;
|
||||
|
||||
if( !iovector.iov_base ) {
|
||||
fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" );
|
||||
if (!iovector.iov_base) {
|
||||
fprintf(stderr, "Problem with iovec_fix_increase_or_free\n");
|
||||
deflateEnd(&strm);
|
||||
return mutex_bucket_unlock( bucket, 0 );
|
||||
return mutex_bucket_unlock(bucket, 0);
|
||||
}
|
||||
strm.next_out = iovector.iov_base;
|
||||
strm.avail_out = pending;
|
||||
if( deflate( &strm, Z_FINISH ) < Z_OK )
|
||||
fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" );
|
||||
if (deflate(&strm, Z_FINISH) < Z_OK)
|
||||
fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
|
||||
|
||||
if( mutex_workqueue_pushchunked(taskid, &iovector) )
|
||||
if (mutex_workqueue_pushchunked(taskid, &iovector))
|
||||
free(iovector.iov_base);
|
||||
}
|
||||
}
|
||||
|
@ -8,9 +8,11 @@
|
||||
|
||||
#ifdef WANT_FULLSCRAPE
|
||||
|
||||
void fullscrape_init( );
|
||||
void fullscrape_deinit( );
|
||||
void fullscrape_deliver( int64 sock, ot_tasktype tasktype );
|
||||
#include "ot_mutex.h"
|
||||
|
||||
void fullscrape_init();
|
||||
void fullscrape_deinit();
|
||||
void fullscrape_deliver(int64 sock, ot_tasktype tasktype);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -22,9 +22,9 @@ struct http_data {
|
||||
STRUCT_HTTP_FLAG flag;
|
||||
};
|
||||
|
||||
ssize_t http_handle_request( const int64 s, struct ot_workstruct *ws );
|
||||
ssize_t http_sendiovecdata( const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial );
|
||||
ssize_t http_issue_error( const int64 s, struct ot_workstruct *ws, int code );
|
||||
ssize_t http_handle_request(const int64 s, struct ot_workstruct *ws);
|
||||
ssize_t http_sendiovecdata(const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial);
|
||||
ssize_t http_issue_error(const int64 s, struct ot_workstruct *ws, int code);
|
||||
|
||||
extern char *g_stats_path;
|
||||
extern ssize_t g_stats_path_len;
|
||||
|
71
ot_iovec.c
71
ot_iovec.c
@ -4,90 +4,89 @@
|
||||
$id$ */
|
||||
|
||||
/* System */
|
||||
#include <sys/types.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/uio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* Libowfat */
|
||||
|
||||
/* Opentracker */
|
||||
#include "ot_iovec.h"
|
||||
|
||||
void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ) {
|
||||
void *new_data;
|
||||
int new_entries = 1 + *iovec_entries;
|
||||
struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) );
|
||||
void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc) {
|
||||
void *new_data;
|
||||
int new_entries = 1 + *iovec_entries;
|
||||
struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
|
||||
|
||||
if( !new_vec )
|
||||
if (!new_vec)
|
||||
return NULL;
|
||||
|
||||
/* Only allocate after we have a place to store the pointer */
|
||||
new_data = malloc( new_alloc );
|
||||
if( !new_data )
|
||||
new_data = malloc(new_alloc);
|
||||
if (!new_data)
|
||||
return NULL;
|
||||
|
||||
new_vec[new_entries - 1].iov_base = new_data;
|
||||
new_vec[new_entries - 1].iov_len = new_alloc;
|
||||
|
||||
*iovector = new_vec;
|
||||
*iovector = new_vec;
|
||||
++*iovec_entries;
|
||||
return new_data;
|
||||
}
|
||||
|
||||
void *iovec_append( int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) {
|
||||
int new_entries = *iovec_entries + 1;
|
||||
struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) );
|
||||
if( !new_vec )
|
||||
void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) {
|
||||
int new_entries = *iovec_entries + 1;
|
||||
struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
|
||||
if (!new_vec)
|
||||
return NULL;
|
||||
|
||||
/* Take over data from appended iovec */
|
||||
new_vec[*iovec_entries].iov_base = append_iovector->iov_base;
|
||||
new_vec[*iovec_entries].iov_len = append_iovector->iov_len;
|
||||
|
||||
append_iovector->iov_base = NULL;
|
||||
append_iovector->iov_len = 0;
|
||||
append_iovector->iov_base = NULL;
|
||||
append_iovector->iov_len = 0;
|
||||
|
||||
*iovector = new_vec;
|
||||
*iovec_entries = new_entries;
|
||||
*iovector = new_vec;
|
||||
*iovec_entries = new_entries;
|
||||
|
||||
return new_vec;
|
||||
}
|
||||
|
||||
|
||||
void iovec_free( int *iovec_entries, struct iovec **iovector ) {
|
||||
void iovec_free(int *iovec_entries, struct iovec **iovector) {
|
||||
int i;
|
||||
for( i=0; i<*iovec_entries; ++i )
|
||||
free( ((*iovector)[i]).iov_base );
|
||||
*iovector = NULL;
|
||||
for (i = 0; i < *iovec_entries; ++i)
|
||||
free(((*iovector)[i]).iov_base);
|
||||
*iovector = NULL;
|
||||
*iovec_entries = 0;
|
||||
}
|
||||
|
||||
void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ) {
|
||||
if( *iovec_entries ) {
|
||||
char * base = (char*)((*iovector)[ *iovec_entries - 1 ]).iov_base;
|
||||
size_t new_alloc = ((char*)last_ptr) - base;
|
||||
void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr) {
|
||||
if (*iovec_entries) {
|
||||
char *base = (char *)((*iovector)[*iovec_entries - 1]).iov_base;
|
||||
size_t new_alloc = ((char *)last_ptr) - base;
|
||||
|
||||
((*iovector)[*iovec_entries - 1 ]).iov_base = realloc( base, new_alloc );
|
||||
((*iovector)[*iovec_entries - 1 ]).iov_len = new_alloc;
|
||||
((*iovector)[*iovec_entries - 1]).iov_base = realloc(base, new_alloc);
|
||||
((*iovector)[*iovec_entries - 1]).iov_len = new_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ) {
|
||||
void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc) {
|
||||
void *new_data;
|
||||
|
||||
iovec_fixlast( iovec_entries, iovector, last_ptr );
|
||||
iovec_fixlast(iovec_entries, iovector, last_ptr);
|
||||
|
||||
if( !( new_data = iovec_increase( iovec_entries, iovector, new_alloc ) ) )
|
||||
iovec_free( iovec_entries, iovector );
|
||||
if (!(new_data = iovec_increase(iovec_entries, iovector, new_alloc)))
|
||||
iovec_free(iovec_entries, iovector);
|
||||
|
||||
return new_data;
|
||||
}
|
||||
|
||||
size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ) {
|
||||
size_t iovec_length(const int *iovec_entries, const struct iovec **iovector) {
|
||||
size_t length = 0;
|
||||
int i;
|
||||
for( i=0; i<*iovec_entries; ++i )
|
||||
int i;
|
||||
for (i = 0; i < *iovec_entries; ++i)
|
||||
length += ((*iovector)[i]).iov_len;
|
||||
return length;
|
||||
}
|
||||
|
12
ot_iovec.h
12
ot_iovec.h
@ -8,13 +8,13 @@
|
||||
|
||||
#include <sys/uio.h>
|
||||
|
||||
void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc );
|
||||
void *iovec_append( int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector );
|
||||
void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr );
|
||||
void iovec_free( int *iovec_entries, struct iovec **iovector );
|
||||
void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc);
|
||||
void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector);
|
||||
void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr);
|
||||
void iovec_free(int *iovec_entries, struct iovec **iovector);
|
||||
|
||||
size_t iovec_length( const int *iovec_entries, const struct iovec **iovector );
|
||||
size_t iovec_length(const int *iovec_entries, const struct iovec **iovector);
|
||||
|
||||
void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc );
|
||||
void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc);
|
||||
|
||||
#endif
|
||||
|
172
ot_livesync.c
172
ot_livesync.c
@ -4,126 +4,126 @@
|
||||
$id$ */
|
||||
|
||||
/* System */
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/uio.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/* Libowfat */
|
||||
#include "socket.h"
|
||||
#include "ndelay.h"
|
||||
#include "byte.h"
|
||||
#include "ip6.h"
|
||||
#include "ndelay.h"
|
||||
#include "socket.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_livesync.h"
|
||||
#include "ot_accesslist.h"
|
||||
#include "ot_stats.h"
|
||||
#include "ot_livesync.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "ot_stats.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
#ifdef WANT_SYNC_LIVE
|
||||
|
||||
char groupip_1[4] = { 224,0,23,5 };
|
||||
char groupip_1[4] = {224, 0, 23, 5};
|
||||
|
||||
#define LIVESYNC_INCOMING_BUFFSIZE (256*256)
|
||||
#define LIVESYNC_INCOMING_BUFFSIZE (256 * 256)
|
||||
|
||||
#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
|
||||
#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash))
|
||||
#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
|
||||
#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash))
|
||||
|
||||
#define LIVESYNC_MAXDELAY 15 /* seconds */
|
||||
#define LIVESYNC_MAXDELAY 15 /* seconds */
|
||||
|
||||
enum { OT_SYNC_PEER4, OT_SYNC_PEER6 };
|
||||
|
||||
/* Forward declaration */
|
||||
static void * livesync_worker( void * args );
|
||||
static void *livesync_worker(void *args);
|
||||
|
||||
/* For outgoing packets */
|
||||
static int64 g_socket_in = -1;
|
||||
static int64 g_socket_in = -1;
|
||||
|
||||
/* For incoming packets */
|
||||
static int64 g_socket_out = -1;
|
||||
static int64 g_socket_out = -1;
|
||||
|
||||
static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
typedef struct {
|
||||
uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
|
||||
size_t fill;
|
||||
ot_time next_packet_time;
|
||||
uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
|
||||
size_t fill;
|
||||
ot_time next_packet_time;
|
||||
} sync_buffer;
|
||||
|
||||
static sync_buffer g_v6_buf;
|
||||
static sync_buffer g_v4_buf;
|
||||
|
||||
static pthread_t thread_id;
|
||||
void livesync_init( ) {
|
||||
static pthread_t thread_id;
|
||||
void livesync_init() {
|
||||
|
||||
if( g_socket_in == -1 )
|
||||
exerr( "No socket address for live sync specified." );
|
||||
if (g_socket_in == -1)
|
||||
exerr("No socket address for live sync specified.");
|
||||
|
||||
/* Prepare outgoing peers buffer */
|
||||
memcpy( g_v6_buf.data, &g_tracker_id, sizeof( g_tracker_id ) );
|
||||
memcpy( g_v4_buf.data, &g_tracker_id, sizeof( g_tracker_id ) );
|
||||
memcpy(g_v6_buf.data, &g_tracker_id, sizeof(g_tracker_id));
|
||||
memcpy(g_v4_buf.data, &g_tracker_id, sizeof(g_tracker_id));
|
||||
|
||||
uint32_pack_big( (char*)g_v6_buf.data + sizeof( g_tracker_id ), OT_SYNC_PEER6);
|
||||
uint32_pack_big( (char*)g_v4_buf.data + sizeof( g_tracker_id ), OT_SYNC_PEER4);
|
||||
uint32_pack_big((char *)g_v6_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER6);
|
||||
uint32_pack_big((char *)g_v4_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER4);
|
||||
|
||||
g_v6_buf.fill = sizeof( g_tracker_id ) + sizeof( uint32_t );
|
||||
g_v4_buf.fill = sizeof( g_tracker_id ) + sizeof( uint32_t );
|
||||
g_v6_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
|
||||
g_v4_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
|
||||
|
||||
g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
|
||||
g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
|
||||
|
||||
pthread_create( &thread_id, NULL, livesync_worker, NULL );
|
||||
pthread_create(&thread_id, NULL, livesync_worker, NULL);
|
||||
}
|
||||
|
||||
void livesync_deinit() {
|
||||
if( g_socket_in != -1 )
|
||||
close( g_socket_in );
|
||||
if( g_socket_out != -1 )
|
||||
close( g_socket_out );
|
||||
if (g_socket_in != -1)
|
||||
close(g_socket_in);
|
||||
if (g_socket_out != -1)
|
||||
close(g_socket_out);
|
||||
|
||||
pthread_cancel( thread_id );
|
||||
pthread_cancel(thread_id);
|
||||
}
|
||||
|
||||
void livesync_bind_mcast( ot_ip6 ip, uint16_t port) {
|
||||
char tmpip[4] = {0,0,0,0};
|
||||
void livesync_bind_mcast(ot_ip6 ip, uint16_t port) {
|
||||
char tmpip[4] = {0, 0, 0, 0};
|
||||
char *v4ip;
|
||||
|
||||
if( !ip6_isv4mapped(ip))
|
||||
if (!ip6_isv4mapped(ip))
|
||||
exerr("v6 mcast support not yet available.");
|
||||
v4ip = ip+12;
|
||||
v4ip = ip + 12;
|
||||
|
||||
if( g_socket_in != -1 )
|
||||
if (g_socket_in != -1)
|
||||
exerr("Error: Livesync listen ip specified twice.");
|
||||
|
||||
if( ( g_socket_in = socket_udp4( )) < 0)
|
||||
exerr("Error: Cant create live sync incoming socket." );
|
||||
if ((g_socket_in = socket_udp4()) < 0)
|
||||
exerr("Error: Cant create live sync incoming socket.");
|
||||
ndelay_off(g_socket_in);
|
||||
|
||||
if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 )
|
||||
exerr("Error: Cant bind live sync incoming socket." );
|
||||
if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1)
|
||||
exerr("Error: Cant bind live sync incoming socket.");
|
||||
|
||||
if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) )
|
||||
if (socket_mcjoin4(g_socket_in, groupip_1, v4ip))
|
||||
exerr("Error: Cant make live sync incoming socket join mcast group.");
|
||||
|
||||
if( ( g_socket_out = socket_udp4()) < 0)
|
||||
exerr("Error: Cant create live sync outgoing socket." );
|
||||
if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 )
|
||||
exerr("Error: Cant bind live sync outgoing socket." );
|
||||
if ((g_socket_out = socket_udp4()) < 0)
|
||||
exerr("Error: Cant create live sync outgoing socket.");
|
||||
if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1)
|
||||
exerr("Error: Cant bind live sync outgoing socket.");
|
||||
|
||||
socket_mcttl4(g_socket_out, 1);
|
||||
socket_mcloop4(g_socket_out, 0);
|
||||
}
|
||||
|
||||
/* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */
|
||||
static void livesync_issue_peersync( sync_buffer *buf ) {
|
||||
static void livesync_issue_peersync(sync_buffer *buf) {
|
||||
char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
|
||||
size_t fill = buf->fill;
|
||||
|
||||
memcpy( mycopy, buf->data, fill );
|
||||
buf->fill = sizeof( g_tracker_id ) + sizeof( uint32_t );
|
||||
memcpy(mycopy, buf->data, fill);
|
||||
buf->fill = sizeof(g_tracker_id) + sizeof(uint32_t);
|
||||
buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
|
||||
|
||||
/* From now this thread has a local copy of the buffer and
|
||||
@ -133,101 +133,99 @@ static void livesync_issue_peersync( sync_buffer *buf ) {
|
||||
socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT);
|
||||
}
|
||||
|
||||
static void livesync_handle_peersync( struct ot_workstruct *ws, size_t peer_size ) {
|
||||
size_t off = sizeof( g_tracker_id ) + sizeof( uint32_t );
|
||||
static void livesync_handle_peersync(struct ot_workstruct *ws, size_t peer_size) {
|
||||
size_t off = sizeof(g_tracker_id) + sizeof(uint32_t);
|
||||
|
||||
/* Now basic sanity checks have been done on the live sync packet
|
||||
We might add more testing and logging. */
|
||||
while( (ssize_t)(off + sizeof( ot_hash ) + peer_size) <= ws->request_size ) {
|
||||
memcpy( &ws->peer, ws->request + off + sizeof(ot_hash), peer_size );
|
||||
ws->hash = (ot_hash*)(ws->request + off);
|
||||
while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= ws->request_size) {
|
||||
memcpy(&ws->peer, ws->request + off + sizeof(ot_hash), peer_size);
|
||||
ws->hash = (ot_hash *)(ws->request + off);
|
||||
|
||||
if( !g_opentracker_running ) return;
|
||||
if (!g_opentracker_running)
|
||||
return;
|
||||
|
||||
if( OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED )
|
||||
remove_peer_from_torrent( FLAG_MCA, ws );
|
||||
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED)
|
||||
remove_peer_from_torrent(FLAG_MCA, ws);
|
||||
else
|
||||
add_peer_to_torrent_and_return_peers( FLAG_MCA, ws, /* amount = */ 0 );
|
||||
add_peer_to_torrent_and_return_peers(FLAG_MCA, ws, /* amount = */ 0);
|
||||
|
||||
off += sizeof( ot_hash ) + peer_size;
|
||||
off += sizeof(ot_hash) + peer_size;
|
||||
}
|
||||
|
||||
stats_issue_event(EVENT_SYNC, 0,
|
||||
(ws->request_size - sizeof( g_tracker_id ) - sizeof( uint32_t ) ) /
|
||||
((ssize_t)sizeof( ot_hash ) + peer_size));
|
||||
stats_issue_event(EVENT_SYNC, 0, (ws->request_size - sizeof(g_tracker_id) - sizeof(uint32_t)) / ((ssize_t)sizeof(ot_hash) + peer_size));
|
||||
}
|
||||
|
||||
/* Tickle the live sync module from time to time, so no events get
|
||||
stuck when there's not enough traffic to fill udp packets fast
|
||||
enough */
|
||||
void livesync_ticker( ) {
|
||||
void livesync_ticker() {
|
||||
/* livesync_issue_peersync sets g_next_packet_time */
|
||||
pthread_mutex_lock(&g_outbuf_mutex);
|
||||
if( g_now_seconds > g_v6_buf.next_packet_time &&
|
||||
g_v6_buf.fill > sizeof( g_tracker_id ) + sizeof( uint32_t ) )
|
||||
if (g_now_seconds > g_v6_buf.next_packet_time && g_v6_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
|
||||
livesync_issue_peersync(&g_v6_buf);
|
||||
else
|
||||
pthread_mutex_unlock(&g_outbuf_mutex);
|
||||
|
||||
pthread_mutex_lock(&g_outbuf_mutex);
|
||||
if( g_now_seconds > g_v4_buf.next_packet_time &&
|
||||
g_v4_buf.fill > sizeof( g_tracker_id ) + sizeof( uint32_t ) )
|
||||
if (g_now_seconds > g_v4_buf.next_packet_time && g_v4_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
|
||||
livesync_issue_peersync(&g_v4_buf);
|
||||
else
|
||||
pthread_mutex_unlock(&g_outbuf_mutex);
|
||||
}
|
||||
|
||||
/* Inform live sync about whats going on. */
|
||||
void livesync_tell( struct ot_workstruct *ws ) {
|
||||
void livesync_tell(struct ot_workstruct *ws) {
|
||||
size_t peer_size; /* initialized in next line */
|
||||
ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size);
|
||||
sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf;
|
||||
|
||||
pthread_mutex_lock(&g_outbuf_mutex);
|
||||
|
||||
memcpy( dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash) );
|
||||
memcpy(dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash));
|
||||
dest_buf->fill += sizeof(ot_hash);
|
||||
|
||||
memcpy( dest_buf->data + dest_buf->fill, peer_src, peer_size );
|
||||
memcpy(dest_buf->data + dest_buf->fill, peer_src, peer_size);
|
||||
dest_buf->fill += peer_size;
|
||||
|
||||
if( dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS )
|
||||
if (dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS)
|
||||
livesync_issue_peersync(dest_buf);
|
||||
else
|
||||
pthread_mutex_unlock(&g_outbuf_mutex);
|
||||
}
|
||||
|
||||
static void * livesync_worker( void * args ) {
|
||||
static void *livesync_worker(void *args) {
|
||||
struct ot_workstruct ws;
|
||||
ot_ip6 in_ip; uint16_t in_port;
|
||||
ot_ip6 in_ip;
|
||||
uint16_t in_port;
|
||||
|
||||
(void)args;
|
||||
|
||||
/* Initialize our "thread local storage" */
|
||||
ws.inbuf = ws.request = malloc( LIVESYNC_INCOMING_BUFFSIZE );
|
||||
ws.outbuf = ws.reply = 0;
|
||||
ws.inbuf = ws.request = malloc(LIVESYNC_INCOMING_BUFFSIZE);
|
||||
ws.outbuf = ws.reply = 0;
|
||||
|
||||
memcpy( in_ip, V4mappedprefix, sizeof( V4mappedprefix ) );
|
||||
memcpy(in_ip, V4mappedprefix, sizeof(V4mappedprefix));
|
||||
|
||||
while( 1 ) {
|
||||
ws.request_size = socket_recv4(g_socket_in, (char*)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port);
|
||||
while (1) {
|
||||
ws.request_size = socket_recv4(g_socket_in, (char *)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port);
|
||||
|
||||
/* Expect at least tracker id and packet type */
|
||||
if( ws.request_size <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) )
|
||||
if (ws.request_size <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t)))
|
||||
continue;
|
||||
if( !accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC))
|
||||
if (!accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC))
|
||||
continue;
|
||||
if( !memcmp( ws.inbuf, &g_tracker_id, sizeof( g_tracker_id ) ) ) {
|
||||
if (!memcmp(ws.inbuf, &g_tracker_id, sizeof(g_tracker_id))) {
|
||||
/* TODO: log packet coming from ourselves */
|
||||
continue;
|
||||
}
|
||||
|
||||
switch( uint32_read_big( sizeof( g_tracker_id ) + (char *)ws.inbuf ) ) {
|
||||
switch (uint32_read_big(sizeof(g_tracker_id) + (char *)ws.inbuf)) {
|
||||
case OT_SYNC_PEER6:
|
||||
livesync_handle_peersync( &ws, OT_PEER_SIZE6 );
|
||||
livesync_handle_peersync(&ws, OT_PEER_SIZE6);
|
||||
break;
|
||||
case OT_SYNC_PEER4:
|
||||
livesync_handle_peersync( &ws, OT_PEER_SIZE4 );
|
||||
livesync_handle_peersync(&ws, OT_PEER_SIZE4);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -51,18 +51,18 @@ void livesync_init();
|
||||
void livesync_deinit();
|
||||
|
||||
/* Join multicast group for listening and create sending socket */
|
||||
void livesync_bind_mcast( char *ip, uint16_t port );
|
||||
void livesync_bind_mcast(char *ip, uint16_t port);
|
||||
|
||||
/* Inform live sync about whats going on. */
|
||||
void livesync_tell( struct ot_workstruct *ws );
|
||||
void livesync_tell(struct ot_workstruct *ws);
|
||||
|
||||
/* Tickle the live sync module from time to time, so no events get
|
||||
stuck when there's not enough traffic to fill udp packets fast
|
||||
enough */
|
||||
void livesync_ticker( );
|
||||
void livesync_ticker();
|
||||
|
||||
/* Handle an incoming live sync packet */
|
||||
void handle_livesync( const int64 sock );
|
||||
void handle_livesync(const int64 sock);
|
||||
|
||||
#else
|
||||
|
||||
|
165
ot_mutex.c
165
ot_mutex.c
@ -16,43 +16,39 @@
|
||||
#include "uint32.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_iovec.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "ot_stats.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */
|
||||
#define MTX_DBG( STRING )
|
||||
#define MTX_DBG(STRING)
|
||||
|
||||
/* Our global all torrents list */
|
||||
static ot_vector all_torrents[OT_BUCKET_COUNT];
|
||||
static ot_vector all_torrents[OT_BUCKET_COUNT];
|
||||
static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT];
|
||||
static size_t g_torrent_count;
|
||||
static size_t g_torrent_count;
|
||||
|
||||
/* Self pipe from opentracker.c */
|
||||
extern int g_self_pipe[2];
|
||||
extern int g_self_pipe[2];
|
||||
|
||||
ot_vector *mutex_bucket_lock( int bucket ) {
|
||||
pthread_mutex_lock(bucket_mutex + bucket );
|
||||
ot_vector *mutex_bucket_lock(int bucket) {
|
||||
pthread_mutex_lock(bucket_mutex + bucket);
|
||||
return all_torrents + bucket;
|
||||
}
|
||||
|
||||
ot_vector *mutex_bucket_lock_by_hash( ot_hash const hash ) {
|
||||
return mutex_bucket_lock( uint32_read_big( (const char*)hash ) >> OT_BUCKET_COUNT_SHIFT );
|
||||
}
|
||||
ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); }
|
||||
|
||||
void mutex_bucket_unlock( int bucket, int delta_torrentcount ) {
|
||||
void mutex_bucket_unlock(int bucket, int delta_torrentcount) {
|
||||
pthread_mutex_unlock(bucket_mutex + bucket);
|
||||
g_torrent_count += delta_torrentcount;
|
||||
}
|
||||
|
||||
void mutex_bucket_unlock_by_hash( ot_hash const hash, int delta_torrentcount ) {
|
||||
mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount );
|
||||
void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) {
|
||||
mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount);
|
||||
}
|
||||
|
||||
size_t mutex_get_torrent_count( ) {
|
||||
return g_torrent_count;
|
||||
}
|
||||
size_t mutex_get_torrent_count() { return g_torrent_count; }
|
||||
|
||||
/* TaskQueue Magic */
|
||||
|
||||
@ -65,16 +61,16 @@ struct ot_task {
|
||||
struct ot_task *next;
|
||||
};
|
||||
|
||||
static ot_taskid next_free_taskid = 1;
|
||||
static ot_taskid next_free_taskid = 1;
|
||||
static struct ot_task *tasklist;
|
||||
static pthread_mutex_t tasklist_mutex;
|
||||
static pthread_cond_t tasklist_being_filled;
|
||||
static pthread_cond_t tasklist_being_filled;
|
||||
|
||||
int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
|
||||
struct ot_task ** tmptask, * task;
|
||||
int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) {
|
||||
struct ot_task **tmptask, *task;
|
||||
|
||||
task = malloc(sizeof( struct ot_task));
|
||||
if( !task )
|
||||
task = malloc(sizeof(struct ot_task));
|
||||
if (!task)
|
||||
return -1;
|
||||
|
||||
task->taskid = 0;
|
||||
@ -85,98 +81,98 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
|
||||
task->next = 0;
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
/* Skip to end of list */
|
||||
tmptask = &tasklist;
|
||||
while( *tmptask )
|
||||
while (*tmptask)
|
||||
tmptask = &(*tmptask)->next;
|
||||
*tmptask = task;
|
||||
|
||||
/* Inform waiting workers and release lock */
|
||||
pthread_cond_broadcast( &tasklist_being_filled );
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_cond_broadcast(&tasklist_being_filled);
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mutex_workqueue_canceltask( int64 sock ) {
|
||||
struct ot_task ** task;
|
||||
void mutex_workqueue_canceltask(int64 sock) {
|
||||
struct ot_task **task;
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
for (task = &tasklist; *task; task = &((*task)->next))
|
||||
if ((*task)->sock == sock) {
|
||||
struct iovec *iovec = (*task)->iovec;
|
||||
struct iovec *iovec = (*task)->iovec;
|
||||
struct ot_task *ptask = *task;
|
||||
int i;
|
||||
int i;
|
||||
|
||||
/* Free task's iovec */
|
||||
for( i=0; i<(*task)->iovec_entries; ++i )
|
||||
free( iovec[i].iov_base );
|
||||
for (i = 0; i < (*task)->iovec_entries; ++i)
|
||||
free(iovec[i].iov_base);
|
||||
|
||||
*task = (*task)->next;
|
||||
free( ptask );
|
||||
free(ptask);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Release lock */
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
}
|
||||
|
||||
ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) {
|
||||
struct ot_task * task;
|
||||
ot_taskid taskid = 0;
|
||||
ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) {
|
||||
struct ot_task *task;
|
||||
ot_taskid taskid = 0;
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
while( !taskid ) {
|
||||
while (!taskid) {
|
||||
/* Skip to the first unassigned task this worker wants to do */
|
||||
for (task = tasklist; task; task = task->next)
|
||||
if (!task->taskid && ( TASK_CLASS_MASK & task->tasktype ) == *tasktype) {
|
||||
if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) {
|
||||
/* If we found an outstanding task, assign a taskid to it
|
||||
and leave the loop */
|
||||
task->taskid = taskid = ++next_free_taskid;
|
||||
*tasktype = task->tasktype;
|
||||
*tasktype = task->tasktype;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Wait until the next task is being fed */
|
||||
if (!taskid)
|
||||
pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex );
|
||||
pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex);
|
||||
}
|
||||
|
||||
/* Release lock */
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
|
||||
return taskid;
|
||||
}
|
||||
|
||||
void mutex_workqueue_pushsuccess( ot_taskid taskid ) {
|
||||
struct ot_task ** task;
|
||||
void mutex_workqueue_pushsuccess(ot_taskid taskid) {
|
||||
struct ot_task **task;
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
for (task = &tasklist; *task; task = &((*task)->next))
|
||||
if ((*task)->taskid == taskid) {
|
||||
struct ot_task *ptask = *task;
|
||||
*task = (*task)->next;
|
||||
free( ptask );
|
||||
free(ptask);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Release lock */
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
}
|
||||
|
||||
int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) {
|
||||
struct ot_task * task;
|
||||
const char byte = 'o';
|
||||
int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) {
|
||||
struct ot_task *task;
|
||||
const char byte = 'o';
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
for (task = tasklist; task; task = task->next)
|
||||
if (task->taskid == taskid) {
|
||||
@ -187,25 +183,25 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove
|
||||
}
|
||||
|
||||
/* Release lock */
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
|
||||
io_trywrite( g_self_pipe[1], &byte, 1 );
|
||||
io_trywrite(g_self_pipe[1], &byte, 1);
|
||||
|
||||
/* Indicate whether the worker has to throw away results */
|
||||
return task ? 0 : -1;
|
||||
}
|
||||
|
||||
int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
|
||||
struct ot_task * task;
|
||||
const char byte = 'o';
|
||||
struct ot_task *task;
|
||||
const char byte = 'o';
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
for (task = tasklist; task; task = task->next)
|
||||
if (task->taskid == taskid) {
|
||||
if( iovec ) {
|
||||
if (iovec_append(&task->iovec_entries, &task->iovec, iovec) )
|
||||
if (iovec) {
|
||||
if (iovec_append(&task->iovec_entries, &task->iovec, iovec))
|
||||
task->tasktype = TASK_DONE_PARTIAL;
|
||||
else
|
||||
task = NULL;
|
||||
@ -215,65 +211,64 @@ int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
|
||||
}
|
||||
|
||||
/* Release lock */
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
|
||||
io_trywrite( g_self_pipe[1], &byte, 1 );
|
||||
io_trywrite(g_self_pipe[1], &byte, 1);
|
||||
|
||||
/* Indicate whether the worker has to throw away results */
|
||||
return task ? 0 : -1;
|
||||
}
|
||||
|
||||
|
||||
int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec, int *is_partial ) {
|
||||
struct ot_task ** task;
|
||||
int64 sock = -1;
|
||||
int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) {
|
||||
struct ot_task **task;
|
||||
int64 sock = -1;
|
||||
|
||||
*is_partial = 0;
|
||||
|
||||
/* Want exclusive access to tasklist */
|
||||
pthread_mutex_lock( &tasklist_mutex );
|
||||
pthread_mutex_lock(&tasklist_mutex);
|
||||
|
||||
for (task = &tasklist; *task; task = &((*task)->next))
|
||||
if (((*task)->tasktype & TASK_CLASS_MASK ) == TASK_DONE) {
|
||||
if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) {
|
||||
struct ot_task *ptask = *task;
|
||||
*iovec_entries = ptask->iovec_entries;
|
||||
*iovec = ptask->iovec;
|
||||
sock = ptask->sock;
|
||||
*iovec_entries = ptask->iovec_entries;
|
||||
*iovec = ptask->iovec;
|
||||
sock = ptask->sock;
|
||||
|
||||
if ((*task)->tasktype == TASK_DONE) {
|
||||
*task = ptask->next;
|
||||
free( ptask );
|
||||
free(ptask);
|
||||
} else {
|
||||
ptask->iovec_entries = 0;
|
||||
ptask->iovec = NULL;
|
||||
*is_partial = 1;
|
||||
ptask->iovec = NULL;
|
||||
*is_partial = 1;
|
||||
/* Prevent task from showing up immediately again unless new data was added */
|
||||
(*task)->tasktype = TASK_FULLSCRAPE;
|
||||
(*task)->tasktype = TASK_FULLSCRAPE;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Release lock */
|
||||
pthread_mutex_unlock( &tasklist_mutex );
|
||||
pthread_mutex_unlock(&tasklist_mutex);
|
||||
return sock;
|
||||
}
|
||||
|
||||
void mutex_init( ) {
|
||||
void mutex_init() {
|
||||
int i;
|
||||
pthread_mutex_init(&tasklist_mutex, NULL);
|
||||
pthread_cond_init (&tasklist_being_filled, NULL);
|
||||
for (i=0; i < OT_BUCKET_COUNT; ++i)
|
||||
pthread_mutex_init(bucket_mutex + i, NULL);
|
||||
byte_zero( all_torrents, sizeof( all_torrents ) );
|
||||
pthread_cond_init(&tasklist_being_filled, NULL);
|
||||
for (i = 0; i < OT_BUCKET_COUNT; ++i)
|
||||
pthread_mutex_init(bucket_mutex + i, NULL);
|
||||
byte_zero(all_torrents, sizeof(all_torrents));
|
||||
}
|
||||
|
||||
void mutex_deinit( ) {
|
||||
void mutex_deinit() {
|
||||
int i;
|
||||
for (i=0; i < OT_BUCKET_COUNT; ++i)
|
||||
pthread_mutex_destroy(bucket_mutex + i);
|
||||
for (i = 0; i < OT_BUCKET_COUNT; ++i)
|
||||
pthread_mutex_destroy(bucket_mutex + i);
|
||||
pthread_mutex_destroy(&tasklist_mutex);
|
||||
pthread_cond_destroy(&tasklist_being_filled);
|
||||
byte_zero( all_torrents, sizeof( all_torrents ) );
|
||||
byte_zero(all_torrents, sizeof(all_torrents));
|
||||
}
|
||||
|
||||
const char *g_version_mutex_c = "$Source$: $Revision$\n";
|
||||
|
103
ot_mutex.h
103
ot_mutex.h
@ -7,72 +7,73 @@
|
||||
#define OT_MUTEX_H__
|
||||
|
||||
#include <sys/uio.h>
|
||||
#include "trackerlogic.h"
|
||||
|
||||
void mutex_init( void );
|
||||
void mutex_deinit( void );
|
||||
void mutex_init(void);
|
||||
void mutex_deinit(void);
|
||||
|
||||
ot_vector *mutex_bucket_lock( int bucket );
|
||||
ot_vector *mutex_bucket_lock_by_hash( ot_hash const hash );
|
||||
ot_vector *mutex_bucket_lock(int bucket);
|
||||
ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash);
|
||||
|
||||
void mutex_bucket_unlock( int bucket, int delta_torrentcount );
|
||||
void mutex_bucket_unlock_by_hash( ot_hash const hash, int delta_torrentcount );
|
||||
void mutex_bucket_unlock(int bucket, int delta_torrentcount);
|
||||
void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount);
|
||||
|
||||
size_t mutex_get_torrent_count(void);
|
||||
size_t mutex_get_torrent_count(void);
|
||||
|
||||
typedef enum {
|
||||
TASK_STATS_CONNS = 0x0001,
|
||||
TASK_STATS_TCP = 0x0002,
|
||||
TASK_STATS_UDP = 0x0003,
|
||||
TASK_STATS_SCRAPE = 0x0004,
|
||||
TASK_STATS_FULLSCRAPE = 0x0005,
|
||||
TASK_STATS_TPB = 0x0006,
|
||||
TASK_STATS_HTTPERRORS = 0x0007,
|
||||
TASK_STATS_VERSION = 0x0008,
|
||||
TASK_STATS_BUSY_NETWORKS = 0x0009,
|
||||
TASK_STATS_RENEW = 0x000a,
|
||||
TASK_STATS_SYNCS = 0x000b,
|
||||
TASK_STATS_COMPLETED = 0x000c,
|
||||
TASK_STATS_NUMWANTS = 0x000d,
|
||||
TASK_STATS_CONNS = 0x0001,
|
||||
TASK_STATS_TCP = 0x0002,
|
||||
TASK_STATS_UDP = 0x0003,
|
||||
TASK_STATS_SCRAPE = 0x0004,
|
||||
TASK_STATS_FULLSCRAPE = 0x0005,
|
||||
TASK_STATS_TPB = 0x0006,
|
||||
TASK_STATS_HTTPERRORS = 0x0007,
|
||||
TASK_STATS_VERSION = 0x0008,
|
||||
TASK_STATS_BUSY_NETWORKS = 0x0009,
|
||||
TASK_STATS_RENEW = 0x000a,
|
||||
TASK_STATS_SYNCS = 0x000b,
|
||||
TASK_STATS_COMPLETED = 0x000c,
|
||||
TASK_STATS_NUMWANTS = 0x000d,
|
||||
|
||||
TASK_STATS = 0x0100, /* Mask */
|
||||
TASK_STATS_TORRENTS = 0x0101,
|
||||
TASK_STATS_PEERS = 0x0102,
|
||||
TASK_STATS_SLASH24S = 0x0103,
|
||||
TASK_STATS_TOP10 = 0x0104,
|
||||
TASK_STATS_TOP100 = 0x0105,
|
||||
TASK_STATS_EVERYTHING = 0x0106,
|
||||
TASK_STATS_FULLLOG = 0x0107,
|
||||
TASK_STATS_WOODPECKERS = 0x0108,
|
||||
TASK_STATS = 0x0100, /* Mask */
|
||||
TASK_STATS_TORRENTS = 0x0101,
|
||||
TASK_STATS_PEERS = 0x0102,
|
||||
TASK_STATS_SLASH24S = 0x0103,
|
||||
TASK_STATS_TOP10 = 0x0104,
|
||||
TASK_STATS_TOP100 = 0x0105,
|
||||
TASK_STATS_EVERYTHING = 0x0106,
|
||||
TASK_STATS_FULLLOG = 0x0107,
|
||||
TASK_STATS_WOODPECKERS = 0x0108,
|
||||
|
||||
TASK_FULLSCRAPE = 0x0200, /* Default mode */
|
||||
TASK_FULLSCRAPE_TPB_BINARY = 0x0201,
|
||||
TASK_FULLSCRAPE_TPB_ASCII = 0x0202,
|
||||
TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203,
|
||||
TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204,
|
||||
TASK_FULLSCRAPE_TRACKERSTATE = 0x0205,
|
||||
TASK_FULLSCRAPE = 0x0200, /* Default mode */
|
||||
TASK_FULLSCRAPE_TPB_BINARY = 0x0201,
|
||||
TASK_FULLSCRAPE_TPB_ASCII = 0x0202,
|
||||
TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203,
|
||||
TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204,
|
||||
TASK_FULLSCRAPE_TRACKERSTATE = 0x0205,
|
||||
|
||||
TASK_DMEM = 0x0300,
|
||||
TASK_DMEM = 0x0300,
|
||||
|
||||
TASK_DONE = 0x0f00,
|
||||
TASK_DONE_PARTIAL = 0x0f01,
|
||||
TASK_DONE = 0x0f00,
|
||||
TASK_DONE_PARTIAL = 0x0f01,
|
||||
|
||||
TASK_FLAG_GZIP = 0x1000,
|
||||
TASK_FLAG_BZIP2 = 0x2000,
|
||||
TASK_FLAG_CHUNKED = 0x4000,
|
||||
TASK_FLAG_GZIP = 0x1000,
|
||||
TASK_FLAG_BZIP2 = 0x2000,
|
||||
TASK_FLAG_CHUNKED = 0x4000,
|
||||
|
||||
TASK_TASK_MASK = 0x0fff,
|
||||
TASK_CLASS_MASK = 0x0f00,
|
||||
TASK_FLAGS_MASK = 0xf000
|
||||
TASK_TASK_MASK = 0x0fff,
|
||||
TASK_CLASS_MASK = 0x0f00,
|
||||
TASK_FLAGS_MASK = 0xf000
|
||||
} ot_tasktype;
|
||||
|
||||
typedef unsigned long ot_taskid;
|
||||
|
||||
int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype );
|
||||
void mutex_workqueue_canceltask( int64 sock );
|
||||
void mutex_workqueue_pushsuccess( ot_taskid taskid );
|
||||
ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype );
|
||||
int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector );
|
||||
int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec);
|
||||
int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovector, int *is_partial );
|
||||
int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype);
|
||||
void mutex_workqueue_canceltask(int64 sock);
|
||||
void mutex_workqueue_pushsuccess(ot_taskid taskid);
|
||||
ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype);
|
||||
int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovector);
|
||||
int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec);
|
||||
int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovector, int *is_partial);
|
||||
|
||||
#endif
|
||||
|
953
ot_stats.c
953
ot_stats.c
File diff suppressed because it is too large
Load Diff
20
ot_stats.h
20
ot_stats.h
@ -6,10 +6,12 @@
|
||||
#ifndef OT_STATS_H__
|
||||
#define OT_STATS_H__
|
||||
|
||||
#include "trackerlogic.h"
|
||||
|
||||
typedef enum {
|
||||
EVENT_ACCEPT,
|
||||
EVENT_READ,
|
||||
EVENT_CONNECT, /* UDP only */
|
||||
EVENT_CONNECT, /* UDP only */
|
||||
EVENT_ANNOUNCE,
|
||||
EVENT_COMPLETED,
|
||||
EVENT_RENEW,
|
||||
@ -17,7 +19,7 @@ typedef enum {
|
||||
EVENT_SCRAPE,
|
||||
EVENT_FULLSCRAPE_REQUEST,
|
||||
EVENT_FULLSCRAPE_REQUEST_GZIP,
|
||||
EVENT_FULLSCRAPE, /* TCP only */
|
||||
EVENT_FULLSCRAPE, /* TCP only */
|
||||
EVENT_FAILED,
|
||||
EVENT_BUCKET_LOCKED,
|
||||
EVENT_WOODPECKER,
|
||||
@ -38,13 +40,13 @@ enum {
|
||||
CODE_HTTPERROR_COUNT
|
||||
};
|
||||
|
||||
void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data );
|
||||
void stats_deliver( int64 sock, int tasktype );
|
||||
void stats_cleanup( void );
|
||||
size_t return_stats_for_tracker( char *reply, int mode, int format );
|
||||
size_t stats_return_tracker_version( char *reply );
|
||||
void stats_init( void );
|
||||
void stats_deinit( void );
|
||||
void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data);
|
||||
void stats_deliver(int64 sock, int tasktype);
|
||||
void stats_cleanup(void);
|
||||
size_t return_stats_for_tracker(char *reply, int mode, int format);
|
||||
size_t stats_return_tracker_version(char *reply);
|
||||
void stats_init(void);
|
||||
void stats_deinit(void);
|
||||
|
||||
extern const char *g_version_rijndael_c;
|
||||
extern const char *g_version_livesync_c;
|
||||
|
263
ot_udp.c
263
ot_udp.c
@ -4,31 +4,31 @@
|
||||
$id$ */
|
||||
|
||||
/* System */
|
||||
#include <stdlib.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
/* Libowfat */
|
||||
#include "socket.h"
|
||||
#include "io.h"
|
||||
#include "ip6.h"
|
||||
#include "socket.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_udp.h"
|
||||
#include "ot_stats.h"
|
||||
#include "ot_rijndael.h"
|
||||
#include "ot_stats.h"
|
||||
#include "ot_udp.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
#if 0
|
||||
static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff };
|
||||
#endif
|
||||
static uint32_t g_rijndael_round_key[44] = {0};
|
||||
static uint32_t g_key_of_the_hour[2] = {0};
|
||||
static uint32_t g_key_of_the_hour[2] = {0};
|
||||
static ot_time g_hour_of_the_key;
|
||||
|
||||
static void udp_generate_rijndael_round_key() {
|
||||
static void udp_generate_rijndael_round_key() {
|
||||
uint32_t key[16];
|
||||
#ifdef WANT_ARC4RANDOM
|
||||
arc4random_buf(&key[0], sizeof(key));
|
||||
@ -38,7 +38,7 @@ static void udp_generate_rijndael_round_key() {
|
||||
key[2] = random();
|
||||
key[3] = random();
|
||||
#endif
|
||||
rijndaelKeySetupEnc128( g_rijndael_round_key, (uint8_t*)key );
|
||||
rijndaelKeySetupEnc128(g_rijndael_round_key, (uint8_t *)key);
|
||||
|
||||
#ifdef WANT_ARC4RANDOM
|
||||
g_key_of_the_hour[0] = arc4random();
|
||||
@ -49,181 +49,190 @@ static void udp_generate_rijndael_round_key() {
|
||||
}
|
||||
|
||||
/* Generate current and previous connection id for ip */
|
||||
static void udp_make_connectionid( uint32_t connid[2], const ot_ip6 remoteip, int age ) {
|
||||
static void udp_make_connectionid(uint32_t connid[2], const ot_ip6 remoteip, int age) {
|
||||
uint32_t plain[4], crypt[4];
|
||||
int i;
|
||||
if( g_now_minutes + 60 > g_hour_of_the_key ) {
|
||||
g_hour_of_the_key = g_now_minutes;
|
||||
int i;
|
||||
if (g_now_minutes + 60 > g_hour_of_the_key) {
|
||||
g_hour_of_the_key = g_now_minutes;
|
||||
g_key_of_the_hour[1] = g_key_of_the_hour[0];
|
||||
#ifdef WANT_ARC4RANDOM
|
||||
g_key_of_the_hour[0] = arc4random();
|
||||
g_key_of_the_hour[0] = arc4random();
|
||||
#else
|
||||
g_key_of_the_hour[0] = random();
|
||||
g_key_of_the_hour[0] = random();
|
||||
#endif
|
||||
}
|
||||
|
||||
memcpy( plain, remoteip, sizeof( plain ) );
|
||||
for( i=0; i<4; ++i ) plain[i] ^= g_key_of_the_hour[age];
|
||||
rijndaelEncrypt128( g_rijndael_round_key, (uint8_t*)remoteip, (uint8_t*)crypt );
|
||||
memcpy(plain, remoteip, sizeof(plain));
|
||||
for (i = 0; i < 4; ++i)
|
||||
plain[i] ^= g_key_of_the_hour[age];
|
||||
rijndaelEncrypt128(g_rijndael_round_key, (uint8_t *)remoteip, (uint8_t *)crypt);
|
||||
connid[0] = crypt[0] ^ crypt[1];
|
||||
connid[1] = crypt[2] ^ crypt[3];
|
||||
}
|
||||
|
||||
/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */
|
||||
int handle_udp6( int64 serversocket, struct ot_workstruct *ws ) {
|
||||
ot_ip6 remoteip;
|
||||
uint32_t *inpacket = (uint32_t*)ws->inbuf;
|
||||
uint32_t *outpacket = (uint32_t*)ws->outbuf;
|
||||
uint32_t left, event, scopeid;
|
||||
uint32_t connid[2];
|
||||
uint32_t action;
|
||||
uint16_t port, remoteport;
|
||||
size_t byte_count, scrape_count;
|
||||
int handle_udp6(int64 serversocket, struct ot_workstruct *ws) {
|
||||
ot_ip6 remoteip;
|
||||
uint32_t *inpacket = (uint32_t *)ws->inbuf;
|
||||
uint32_t *outpacket = (uint32_t *)ws->outbuf;
|
||||
uint32_t left, event, scopeid;
|
||||
uint32_t connid[2];
|
||||
uint32_t action;
|
||||
uint16_t port, remoteport;
|
||||
size_t byte_count, scrape_count;
|
||||
|
||||
byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid );
|
||||
if( !byte_count ) return 0;
|
||||
byte_count = socket_recv6(serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid);
|
||||
if (!byte_count)
|
||||
return 0;
|
||||
|
||||
stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip );
|
||||
stats_issue_event( EVENT_READ, FLAG_UDP, byte_count );
|
||||
stats_issue_event(EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip);
|
||||
stats_issue_event(EVENT_READ, FLAG_UDP, byte_count);
|
||||
|
||||
/* Minimum udp tracker packet size, also catches error */
|
||||
if( byte_count < 16 )
|
||||
if (byte_count < 16)
|
||||
return 1;
|
||||
|
||||
/* Get action to take. Ignore error messages and broken packets */
|
||||
action = ntohl( inpacket[2] );
|
||||
if( action > 2 )
|
||||
action = ntohl(inpacket[2]);
|
||||
if (action > 2)
|
||||
return 1;
|
||||
|
||||
/* Generate the connection id we give out and expect to and from
|
||||
the requesting ip address, this prevents udp spoofing */
|
||||
udp_make_connectionid( connid, remoteip, 0 );
|
||||
udp_make_connectionid(connid, remoteip, 0);
|
||||
|
||||
/* Initialise hash pointer */
|
||||
ws->hash = NULL;
|
||||
ws->hash = NULL;
|
||||
ws->peer_id = NULL;
|
||||
|
||||
/* If action is not 0 (connect), then we expect the derived
|
||||
connection id in first 64 bit */
|
||||
if( ( action > 0 ) && ( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) ) {
|
||||
if ((action > 0) && (inpacket[0] != connid[0] || inpacket[1] != connid[1])) {
|
||||
/* If connection id does not match, try the one that was
|
||||
valid in the previous hour. Only if this also does not
|
||||
match, return an error packet */
|
||||
udp_make_connectionid( connid, remoteip, 1 );
|
||||
if( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) {
|
||||
const size_t s = sizeof( "Connection ID missmatch." );
|
||||
outpacket[0] = htonl( 3 ); outpacket[1] = inpacket[3];
|
||||
memcpy( &outpacket[2], "Connection ID missmatch.", s );
|
||||
socket_send6( serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0 );
|
||||
stats_issue_event( EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s );
|
||||
udp_make_connectionid(connid, remoteip, 1);
|
||||
if (inpacket[0] != connid[0] || inpacket[1] != connid[1]) {
|
||||
const size_t s = sizeof("Connection ID missmatch.");
|
||||
outpacket[0] = htonl(3);
|
||||
outpacket[1] = inpacket[3];
|
||||
memcpy(&outpacket[2], "Connection ID missmatch.", s);
|
||||
socket_send6(serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0);
|
||||
stats_issue_event(EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
switch( action ) {
|
||||
case 0: /* This is a connect action */
|
||||
/* look for udp bittorrent magic id */
|
||||
if( (ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980) )
|
||||
return 1;
|
||||
switch (action) {
|
||||
case 0: /* This is a connect action */
|
||||
/* look for udp bittorrent magic id */
|
||||
if ((ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980))
|
||||
return 1;
|
||||
|
||||
outpacket[0] = 0;
|
||||
outpacket[1] = inpacket[3];
|
||||
outpacket[2] = connid[0];
|
||||
outpacket[3] = connid[1];
|
||||
outpacket[0] = 0;
|
||||
outpacket[1] = inpacket[3];
|
||||
outpacket[2] = connid[0];
|
||||
outpacket[3] = connid[1];
|
||||
|
||||
socket_send6( serversocket, ws->outbuf, 16, remoteip, remoteport, 0 );
|
||||
stats_issue_event( EVENT_CONNECT, FLAG_UDP, 16 );
|
||||
socket_send6(serversocket, ws->outbuf, 16, remoteip, remoteport, 0);
|
||||
stats_issue_event(EVENT_CONNECT, FLAG_UDP, 16);
|
||||
break;
|
||||
case 1: /* This is an announce action */
|
||||
/* Minimum udp announce packet size */
|
||||
if (byte_count < 98)
|
||||
return 1;
|
||||
|
||||
/* We do only want to know, if it is zero */
|
||||
left = inpacket[64 / 4] | inpacket[68 / 4];
|
||||
|
||||
event = ntohl(inpacket[80 / 4]);
|
||||
port = *(uint16_t *)(((char *)inpacket) + 96);
|
||||
ws->hash = (ot_hash *)(((char *)inpacket) + 16);
|
||||
|
||||
OT_SETIP(ws->peer, remoteip);
|
||||
OT_SETPORT(ws->peer, &port);
|
||||
OT_PEERFLAG(ws->peer) = 0;
|
||||
|
||||
switch (event) {
|
||||
case 1:
|
||||
OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
|
||||
break;
|
||||
case 1: /* This is an announce action */
|
||||
/* Minimum udp announce packet size */
|
||||
if( byte_count < 98 )
|
||||
return 1;
|
||||
|
||||
/* We do only want to know, if it is zero */
|
||||
left = inpacket[64/4] | inpacket[68/4];
|
||||
|
||||
event = ntohl( inpacket[80/4] );
|
||||
port = *(uint16_t*)( ((char*)inpacket) + 96 );
|
||||
ws->hash = (ot_hash*)( ((char*)inpacket) + 16 );
|
||||
|
||||
OT_SETIP( ws->peer, remoteip );
|
||||
OT_SETPORT( ws->peer, &port );
|
||||
OT_PEERFLAG( ws->peer ) = 0;
|
||||
|
||||
switch( event ) {
|
||||
case 1: OT_PEERFLAG( ws->peer ) |= PEER_FLAG_COMPLETED; break;
|
||||
case 3: OT_PEERFLAG( ws->peer ) |= PEER_FLAG_STOPPED; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
if( !left )
|
||||
OT_PEERFLAG( ws->peer ) |= PEER_FLAG_SEEDING;
|
||||
|
||||
outpacket[0] = htonl( 1 ); /* announce action */
|
||||
outpacket[1] = inpacket[12/4];
|
||||
|
||||
if( OT_PEERFLAG( ws->peer ) & PEER_FLAG_STOPPED ) { /* Peer is gone. */
|
||||
ws->reply = ws->outbuf;
|
||||
ws->reply_size = remove_peer_from_torrent( FLAG_UDP, ws );
|
||||
} else {
|
||||
/* Limit amount of peers to OT_MAX_PEERS_UDP */
|
||||
uint32_t numwant = ntohl( inpacket[92/4] );
|
||||
size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6;
|
||||
if (numwant > max_peers) numwant = max_peers;
|
||||
|
||||
ws->reply = ws->outbuf + 8;
|
||||
ws->reply_size = 8 + add_peer_to_torrent_and_return_peers( FLAG_UDP, ws, numwant );
|
||||
}
|
||||
|
||||
socket_send6( serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0 );
|
||||
stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size );
|
||||
case 3:
|
||||
OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
|
||||
break;
|
||||
|
||||
case 2: /* This is a scrape action */
|
||||
outpacket[0] = htonl( 2 ); /* scrape action */
|
||||
outpacket[1] = inpacket[12/4];
|
||||
|
||||
for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ )
|
||||
return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count );
|
||||
|
||||
socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 );
|
||||
stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count );
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!left)
|
||||
OT_PEERFLAG(ws->peer) |= PEER_FLAG_SEEDING;
|
||||
|
||||
outpacket[0] = htonl(1); /* announce action */
|
||||
outpacket[1] = inpacket[12 / 4];
|
||||
|
||||
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) { /* Peer is gone. */
|
||||
ws->reply = ws->outbuf;
|
||||
ws->reply_size = remove_peer_from_torrent(FLAG_UDP, ws);
|
||||
} else {
|
||||
/* Limit amount of peers to OT_MAX_PEERS_UDP */
|
||||
uint32_t numwant = ntohl(inpacket[92 / 4]);
|
||||
size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6;
|
||||
if (numwant > max_peers)
|
||||
numwant = max_peers;
|
||||
|
||||
ws->reply = ws->outbuf + 8;
|
||||
ws->reply_size = 8 + add_peer_to_torrent_and_return_peers(FLAG_UDP, ws, numwant);
|
||||
}
|
||||
|
||||
socket_send6(serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0);
|
||||
stats_issue_event(EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size);
|
||||
break;
|
||||
|
||||
case 2: /* This is a scrape action */
|
||||
outpacket[0] = htonl(2); /* scrape action */
|
||||
outpacket[1] = inpacket[12 / 4];
|
||||
|
||||
for (scrape_count = 0; (scrape_count * 20 < byte_count - 16) && (scrape_count <= 74); scrape_count++)
|
||||
return_udp_scrape_for_torrent(*(ot_hash *)(((char *)inpacket) + 16 + 20 * scrape_count), ((char *)outpacket) + 8 + 12 * scrape_count);
|
||||
|
||||
socket_send6(serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0);
|
||||
stats_issue_event(EVENT_SCRAPE, FLAG_UDP, scrape_count);
|
||||
break;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void* udp_worker( void * args ) {
|
||||
int64 sock = (int64)args;
|
||||
static void *udp_worker(void *args) {
|
||||
int64 sock = (int64)args;
|
||||
struct ot_workstruct ws;
|
||||
memset( &ws, 0, sizeof(ws) );
|
||||
memset(&ws, 0, sizeof(ws));
|
||||
|
||||
ws.inbuf=malloc(G_INBUF_SIZE);
|
||||
ws.outbuf=malloc(G_OUTBUF_SIZE);
|
||||
#ifdef _DEBUG_HTTPERROR
|
||||
ws.debugbuf=malloc(G_DEBUGBUF_SIZE);
|
||||
ws.inbuf = malloc(G_INBUF_SIZE);
|
||||
ws.outbuf = malloc(G_OUTBUF_SIZE);
|
||||
#ifdef _DEBUG_HTTPERROR
|
||||
ws.debugbuf = malloc(G_DEBUGBUF_SIZE);
|
||||
#endif
|
||||
|
||||
while( g_opentracker_running )
|
||||
handle_udp6( sock, &ws );
|
||||
while (g_opentracker_running)
|
||||
handle_udp6(sock, &ws);
|
||||
|
||||
free( ws.inbuf );
|
||||
free( ws.outbuf );
|
||||
#ifdef _DEBUG_HTTPERROR
|
||||
free( ws.debugbuf );
|
||||
free(ws.inbuf);
|
||||
free(ws.outbuf);
|
||||
#ifdef _DEBUG_HTTPERROR
|
||||
free(ws.debugbuf);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void udp_init( int64 sock, unsigned int worker_count ) {
|
||||
void udp_init(int64 sock, unsigned int worker_count) {
|
||||
pthread_t thread_id;
|
||||
if( !g_rijndael_round_key[0] )
|
||||
if (!g_rijndael_round_key[0])
|
||||
udp_generate_rijndael_round_key();
|
||||
#ifdef _DEBUG
|
||||
fprintf( stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock );
|
||||
fprintf(stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock);
|
||||
#endif
|
||||
while( worker_count-- )
|
||||
pthread_create( &thread_id, NULL, udp_worker, (void *)sock );
|
||||
while (worker_count--)
|
||||
pthread_create(&thread_id, NULL, udp_worker, (void *)sock);
|
||||
}
|
||||
|
||||
const char *g_version_udp_c = "$Source$: $Revision$\n";
|
||||
|
4
ot_udp.h
4
ot_udp.h
@ -6,7 +6,7 @@
|
||||
#ifndef OT_UDP_H__
|
||||
#define OT_UDP_H__
|
||||
|
||||
void udp_init( int64 sock, unsigned int worker_count );
|
||||
int handle_udp6( int64 serversocket, struct ot_workstruct *ws );
|
||||
void udp_init(int64 sock, unsigned int worker_count);
|
||||
int handle_udp6(int64 serversocket, struct ot_workstruct *ws);
|
||||
|
||||
#endif
|
||||
|
236
ot_vector.c
236
ot_vector.c
@ -4,43 +4,37 @@
|
||||
$id$ */
|
||||
|
||||
/* System */
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_vector.h"
|
||||
|
||||
/* Libowfat */
|
||||
#include "uint32.h"
|
||||
#include "uint16.h"
|
||||
#include "uint32.h"
|
||||
|
||||
static int vector_compare_peer6(const void *peer1, const void *peer2 ) {
|
||||
return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE6 );
|
||||
}
|
||||
static int vector_compare_peer4(const void *peer1, const void *peer2 ) {
|
||||
return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE4 );
|
||||
}
|
||||
static int vector_compare_peer6(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE6); }
|
||||
static int vector_compare_peer4(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE4); }
|
||||
|
||||
/* This function gives us a binary search that returns a pointer, even if
|
||||
no exact match is found. In that case it sets exactmatch 0 and gives
|
||||
calling functions the chance to insert data
|
||||
*/
|
||||
void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size,
|
||||
size_t compare_size, int *exactmatch ) {
|
||||
void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch) {
|
||||
size_t interval = member_count;
|
||||
|
||||
while( interval ) {
|
||||
uint8_t *lookat = ((uint8_t*)base) + member_size * ( interval / 2 );
|
||||
int cmp = memcmp( lookat, key, compare_size );
|
||||
if(cmp == 0 ) {
|
||||
while (interval) {
|
||||
uint8_t *lookat = ((uint8_t *)base) + member_size * (interval / 2);
|
||||
int cmp = memcmp(lookat, key, compare_size);
|
||||
if (cmp == 0) {
|
||||
base = lookat;
|
||||
break;
|
||||
}
|
||||
if(cmp < 0) {
|
||||
if (cmp < 0) {
|
||||
base = lookat + member_size;
|
||||
interval--;
|
||||
}
|
||||
@ -48,13 +42,14 @@ void *binary_search( const void * const key, const void * base, const size_t mem
|
||||
}
|
||||
|
||||
*exactmatch = interval;
|
||||
return (void*)base;
|
||||
return (void *)base;
|
||||
}
|
||||
|
||||
static uint8_t vector_hash_peer( ot_peer const *peer, size_t compare_size, int bucket_count ) {
|
||||
static uint8_t vector_hash_peer(ot_peer const *peer, size_t compare_size, int bucket_count) {
|
||||
unsigned int hash = 5381;
|
||||
uint8_t *p = (uint8_t*)peer;
|
||||
while( compare_size-- ) hash += (hash<<5) + *(p++);
|
||||
uint8_t *p = (uint8_t *)peer;
|
||||
while (compare_size--)
|
||||
hash += (hash << 5) + *(p++);
|
||||
return hash % bucket_count;
|
||||
}
|
||||
|
||||
@ -65,58 +60,62 @@ static uint8_t vector_hash_peer( ot_peer const *peer, size_t compare_size, int b
|
||||
if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert
|
||||
took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector.
|
||||
*/
|
||||
void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ) {
|
||||
uint8_t *match = binary_search( key, vector->data, vector->size, member_size, compare_size, exactmatch );
|
||||
void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch) {
|
||||
uint8_t *match = binary_search(key, vector->data, vector->size, member_size, compare_size, exactmatch);
|
||||
|
||||
if( *exactmatch ) return match;
|
||||
if (*exactmatch)
|
||||
return match;
|
||||
|
||||
if( vector->size + 1 > vector->space ) {
|
||||
if (vector->size + 1 > vector->space) {
|
||||
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
|
||||
uint8_t *new_data = realloc( vector->data, new_space * member_size );
|
||||
if( !new_data ) return NULL;
|
||||
uint8_t *new_data = realloc(vector->data, new_space * member_size);
|
||||
if (!new_data)
|
||||
return NULL;
|
||||
/* Adjust pointer if it moved by realloc */
|
||||
match = new_data + (match - (uint8_t*)vector->data);
|
||||
match = new_data + (match - (uint8_t *)vector->data);
|
||||
|
||||
vector->data = new_data;
|
||||
vector->data = new_data;
|
||||
vector->space = new_space;
|
||||
}
|
||||
memmove( match + member_size, match, ((uint8_t*)vector->data) + member_size * vector->size - match );
|
||||
memmove(match + member_size, match, ((uint8_t *)vector->data) + member_size * vector->size - match);
|
||||
|
||||
vector->size++;
|
||||
return match;
|
||||
}
|
||||
|
||||
ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch ) {
|
||||
ot_peer *match, *end;
|
||||
ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch) {
|
||||
ot_peer *match, *end;
|
||||
const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
|
||||
size_t match_to_end;
|
||||
size_t match_to_end;
|
||||
|
||||
/* If space is zero but size is set, we're dealing with a list of vector->size buckets */
|
||||
if( vector->space < vector->size )
|
||||
vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, compare_size, vector->size );
|
||||
match = binary_search( peer, vector->data, vector->size, peer_size, compare_size, exactmatch );
|
||||
if (vector->space < vector->size)
|
||||
vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
|
||||
match = binary_search(peer, vector->data, vector->size, peer_size, compare_size, exactmatch);
|
||||
|
||||
if( *exactmatch ) return match;
|
||||
if (*exactmatch)
|
||||
return match;
|
||||
|
||||
/* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */
|
||||
end = (ot_peer*)vector->data + vector->size * peer_size;
|
||||
end = (ot_peer *)vector->data + vector->size * peer_size;
|
||||
match_to_end = end - match;
|
||||
|
||||
if( vector->size + 1 > vector->space ) {
|
||||
ptrdiff_t offset = match - (ot_peer*)vector->data;
|
||||
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
|
||||
ot_peer *new_data = realloc( vector->data, new_space * peer_size );
|
||||
if (vector->size + 1 > vector->space) {
|
||||
ptrdiff_t offset = match - (ot_peer *)vector->data;
|
||||
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
|
||||
ot_peer *new_data = realloc(vector->data, new_space * peer_size);
|
||||
|
||||
if( !new_data ) return NULL;
|
||||
if (!new_data)
|
||||
return NULL;
|
||||
/* Adjust pointer if it moved by realloc */
|
||||
match = new_data + offset;
|
||||
match = new_data + offset;
|
||||
|
||||
vector->data = new_data;
|
||||
vector->data = new_data;
|
||||
vector->space = new_space;
|
||||
}
|
||||
|
||||
/* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */
|
||||
memmove( match + peer_size, match, match_to_end);
|
||||
memmove(match + peer_size, match, match_to_end);
|
||||
|
||||
vector->size++;
|
||||
return match;
|
||||
@ -127,130 +126,134 @@ ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer const *peer, siz
|
||||
1 if a non-seeding peer was removed
|
||||
2 if a seeding peer was removed
|
||||
*/
|
||||
int vector_remove_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size) {
|
||||
int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size) {
|
||||
int exactmatch, was_seeder;
|
||||
ot_peer *match, *end;
|
||||
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
|
||||
ot_peer *match, *end;
|
||||
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
|
||||
|
||||
if( !vector->size ) return 0;
|
||||
if (!vector->size)
|
||||
return 0;
|
||||
|
||||
/* If space is zero but size is set, we're dealing with a list of vector->size buckets */
|
||||
if( vector->space < vector->size )
|
||||
vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, compare_size, vector->size );
|
||||
if (vector->space < vector->size)
|
||||
vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
|
||||
|
||||
end = ((ot_peer*)vector->data) + peer_size * vector->size;
|
||||
match = (ot_peer*)binary_search( peer, vector->data, vector->size, peer_size, compare_size, &exactmatch );
|
||||
if( !exactmatch ) return 0;
|
||||
end = ((ot_peer *)vector->data) + peer_size * vector->size;
|
||||
match = (ot_peer *)binary_search(peer, vector->data, vector->size, peer_size, compare_size, &exactmatch);
|
||||
if (!exactmatch)
|
||||
return 0;
|
||||
|
||||
was_seeder = ( OT_PEERFLAG_D( match, peer_size ) & PEER_FLAG_SEEDING ) ? 2 : 1;
|
||||
memmove( match, match + peer_size, end - match - peer_size );
|
||||
was_seeder = (OT_PEERFLAG_D(match, peer_size) & PEER_FLAG_SEEDING) ? 2 : 1;
|
||||
memmove(match, match + peer_size, end - match - peer_size);
|
||||
|
||||
vector->size--;
|
||||
vector_fixup_peers( vector, peer_size );
|
||||
vector_fixup_peers(vector, peer_size);
|
||||
return was_seeder;
|
||||
}
|
||||
|
||||
void vector_remove_torrent( ot_vector *vector, ot_torrent *match ) {
|
||||
ot_torrent *end = ((ot_torrent*)vector->data) + vector->size;
|
||||
void vector_remove_torrent(ot_vector *vector, ot_torrent *match) {
|
||||
ot_torrent *end = ((ot_torrent *)vector->data) + vector->size;
|
||||
|
||||
if( !vector->size ) return;
|
||||
if (!vector->size)
|
||||
return;
|
||||
|
||||
/* If this is being called after a unsuccessful malloc() for peer_list
|
||||
in add_peer_to_torrent, match->peer_list actually might be NULL */
|
||||
free_peerlist( match->peer_list6 );
|
||||
free_peerlist( match->peer_list4 );
|
||||
free_peerlist(match->peer_list6);
|
||||
free_peerlist(match->peer_list4);
|
||||
|
||||
memmove( match, match + 1, sizeof(ot_torrent) * ( end - match - 1 ) );
|
||||
if( ( --vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) {
|
||||
memmove(match, match + 1, sizeof(ot_torrent) * (end - match - 1));
|
||||
if ((--vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
|
||||
vector->space /= OT_VECTOR_SHRINK_RATIO;
|
||||
vector->data = realloc( vector->data, vector->space * sizeof( ot_torrent ) );
|
||||
vector->data = realloc(vector->data, vector->space * sizeof(ot_torrent));
|
||||
}
|
||||
}
|
||||
|
||||
void vector_clean_list( ot_vector * vector, int num_buckets ) {
|
||||
while( num_buckets-- )
|
||||
free( vector[num_buckets].data );
|
||||
free( vector );
|
||||
void vector_clean_list(ot_vector *vector, int num_buckets) {
|
||||
while (num_buckets--)
|
||||
free(vector[num_buckets].data);
|
||||
free(vector);
|
||||
return;
|
||||
}
|
||||
|
||||
void vector_redistribute_buckets( ot_peerlist * peer_list, size_t peer_size ) {
|
||||
int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1;
|
||||
ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers;
|
||||
int (*sort_func)(const void *, const void *) =
|
||||
peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4;
|
||||
void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size) {
|
||||
int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1;
|
||||
ot_vector *bucket_list_new, *bucket_list_old = &peer_list->peers;
|
||||
int (*sort_func)(const void *, const void *) = peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4;
|
||||
|
||||
if( OT_PEERLIST_HASBUCKETS( peer_list ) ) {
|
||||
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
|
||||
num_buckets_old = peer_list->peers.size;
|
||||
bucket_list_old = peer_list->peers.data;
|
||||
}
|
||||
|
||||
if( peer_list->peer_count < 255 )
|
||||
if (peer_list->peer_count < 255)
|
||||
num_buckets_new = 1;
|
||||
else if( peer_list->peer_count > 8192 )
|
||||
else if (peer_list->peer_count > 8192)
|
||||
num_buckets_new = 64;
|
||||
else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 )
|
||||
else if (peer_list->peer_count >= 512 && peer_list->peer_count < 4096)
|
||||
num_buckets_new = 16;
|
||||
else if( peer_list->peer_count < 512 && num_buckets_old <= 16 )
|
||||
else if (peer_list->peer_count < 512 && num_buckets_old <= 16)
|
||||
num_buckets_new = num_buckets_old;
|
||||
else if( peer_list->peer_count < 512 )
|
||||
else if (peer_list->peer_count < 512)
|
||||
num_buckets_new = 1;
|
||||
else if( peer_list->peer_count < 8192 && num_buckets_old > 1 )
|
||||
else if (peer_list->peer_count < 8192 && num_buckets_old > 1)
|
||||
num_buckets_new = num_buckets_old;
|
||||
else
|
||||
num_buckets_new = 16;
|
||||
|
||||
if( num_buckets_new == num_buckets_old )
|
||||
if (num_buckets_new == num_buckets_old)
|
||||
return;
|
||||
|
||||
/* Assume near perfect distribution */
|
||||
bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) );
|
||||
if( !bucket_list_new) return;
|
||||
bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) );
|
||||
bucket_list_new = malloc(num_buckets_new * sizeof(ot_vector));
|
||||
if (!bucket_list_new)
|
||||
return;
|
||||
bzero(bucket_list_new, num_buckets_new * sizeof(ot_vector));
|
||||
|
||||
tmp = peer_list->peer_count / num_buckets_new;
|
||||
tmp = peer_list->peer_count / num_buckets_new;
|
||||
bucket_size_new = OT_VECTOR_MIN_MEMBERS;
|
||||
while( bucket_size_new < tmp)
|
||||
while (bucket_size_new < tmp)
|
||||
bucket_size_new *= OT_VECTOR_GROW_RATIO;
|
||||
|
||||
/* preallocate vectors to hold all peers */
|
||||
for( bucket=0; bucket<num_buckets_new; ++bucket ) {
|
||||
for (bucket = 0; bucket < num_buckets_new; ++bucket) {
|
||||
bucket_list_new[bucket].space = bucket_size_new;
|
||||
bucket_list_new[bucket].data = malloc( bucket_size_new * peer_size );
|
||||
if( !bucket_list_new[bucket].data )
|
||||
return vector_clean_list( bucket_list_new, num_buckets_new );
|
||||
bucket_list_new[bucket].data = malloc(bucket_size_new * peer_size);
|
||||
if (!bucket_list_new[bucket].data)
|
||||
return vector_clean_list(bucket_list_new, num_buckets_new);
|
||||
}
|
||||
|
||||
/* Now sort them into the correct bucket */
|
||||
for( bucket=0; bucket<num_buckets_old; ++bucket ) {
|
||||
ot_peer * peers_old = bucket_list_old[bucket].data;
|
||||
int peer_count_old = bucket_list_old[bucket].size;
|
||||
while( peer_count_old-- ) {
|
||||
ot_vector * bucket_dest = bucket_list_new;
|
||||
if( num_buckets_new > 1 )
|
||||
for (bucket = 0; bucket < num_buckets_old; ++bucket) {
|
||||
ot_peer *peers_old = bucket_list_old[bucket].data;
|
||||
int peer_count_old = bucket_list_old[bucket].size;
|
||||
while (peer_count_old--) {
|
||||
ot_vector *bucket_dest = bucket_list_new;
|
||||
if (num_buckets_new > 1)
|
||||
bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new);
|
||||
if( bucket_dest->size + 1 > bucket_dest->space ) {
|
||||
void * tmp = realloc( bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space );
|
||||
if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new );
|
||||
if (bucket_dest->size + 1 > bucket_dest->space) {
|
||||
void *tmp = realloc(bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space);
|
||||
if (!tmp)
|
||||
return vector_clean_list(bucket_list_new, num_buckets_new);
|
||||
bucket_dest->data = tmp;
|
||||
bucket_dest->space *= OT_VECTOR_GROW_RATIO;
|
||||
}
|
||||
memcpy((ot_peer*)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size);
|
||||
memcpy((ot_peer *)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size);
|
||||
peers_old += peer_size;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now sort each bucket to later allow bsearch */
|
||||
for( bucket=0; bucket<num_buckets_new; ++bucket )
|
||||
qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func );
|
||||
for (bucket = 0; bucket < num_buckets_new; ++bucket)
|
||||
qsort(bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func);
|
||||
|
||||
/* Everything worked fine. Now link new bucket_list to peer_list */
|
||||
if( OT_PEERLIST_HASBUCKETS( peer_list) )
|
||||
vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size );
|
||||
if (OT_PEERLIST_HASBUCKETS(peer_list))
|
||||
vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
|
||||
else
|
||||
free( peer_list->peers.data );
|
||||
free(peer_list->peers.data);
|
||||
|
||||
if( num_buckets_new > 1 ) {
|
||||
if (num_buckets_new > 1) {
|
||||
peer_list->peers.data = bucket_list_new;
|
||||
peer_list->peers.size = num_buckets_new;
|
||||
peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */
|
||||
@ -258,27 +261,26 @@ void vector_redistribute_buckets( ot_peerlist * peer_list, size_t peer_size ) {
|
||||
peer_list->peers.data = bucket_list_new->data;
|
||||
peer_list->peers.size = bucket_list_new->size;
|
||||
peer_list->peers.space = bucket_list_new->space;
|
||||
free( bucket_list_new );
|
||||
free(bucket_list_new);
|
||||
}
|
||||
}
|
||||
|
||||
void vector_fixup_peers( ot_vector * vector, size_t peer_size ) {
|
||||
void vector_fixup_peers(ot_vector *vector, size_t peer_size) {
|
||||
int need_fix = 0;
|
||||
|
||||
if( !vector->size ) {
|
||||
free( vector->data );
|
||||
vector->data = NULL;
|
||||
if (!vector->size) {
|
||||
free(vector->data);
|
||||
vector->data = NULL;
|
||||
vector->space = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
while( ( vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) &&
|
||||
( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) {
|
||||
while ((vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
|
||||
vector->space /= OT_VECTOR_SHRINK_RATIO;
|
||||
need_fix++;
|
||||
}
|
||||
if( need_fix )
|
||||
vector->data = realloc( vector->data, vector->space * peer_size );
|
||||
if (need_fix)
|
||||
vector->data = realloc(vector->data, vector->space * peer_size);
|
||||
}
|
||||
|
||||
const char *g_version_vector_c = "$Source$: $Revision$\n";
|
||||
|
23
ot_vector.h
23
ot_vector.h
@ -16,22 +16,21 @@
|
||||
#define OT_PEER_BUCKET_MAXCOUNT 256
|
||||
|
||||
typedef struct {
|
||||
void *data;
|
||||
size_t size;
|
||||
size_t space;
|
||||
void *data;
|
||||
size_t size;
|
||||
size_t space;
|
||||
} ot_vector;
|
||||
|
||||
void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size,
|
||||
size_t compare_size, int *exactmatch );
|
||||
void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch );
|
||||
ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch );
|
||||
void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch);
|
||||
void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch);
|
||||
ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch);
|
||||
|
||||
int vector_remove_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size);
|
||||
void vector_remove_torrent( ot_vector *vector, ot_torrent *match );
|
||||
int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size);
|
||||
void vector_remove_torrent(ot_vector *vector, ot_torrent *match);
|
||||
|
||||
/* For ot_clean.c */
|
||||
void vector_redistribute_buckets( ot_peerlist * peer_list, size_t peer_size );
|
||||
void vector_fixup_peers( ot_vector * vector, size_t peer_size );
|
||||
void vector_clean_list( ot_vector * vector, int num_buckets);
|
||||
void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size);
|
||||
void vector_fixup_peers(ot_vector *vector, size_t peer_size);
|
||||
void vector_clean_list(ot_vector *vector, int num_buckets);
|
||||
|
||||
#endif
|
||||
|
@ -45,37 +45,45 @@ static const unsigned char is_unreserved[256] = {
|
||||
|
||||
/* Do a fast nibble to hex representation conversion */
|
||||
static unsigned char fromhex(unsigned char x) {
|
||||
x-='0'; if( x<=9) return x;
|
||||
x&=~0x20; x-='A'-'0';
|
||||
if( x<6 ) return x+10;
|
||||
x -= '0';
|
||||
if (x <= 9)
|
||||
return x;
|
||||
x &= ~0x20;
|
||||
x -= 'A' - '0';
|
||||
if (x < 6)
|
||||
return x + 10;
|
||||
return 0xff;
|
||||
}
|
||||
|
||||
/* Skip the value of a param=value pair */
|
||||
void scan_urlencoded_skipvalue( char **string ) {
|
||||
const unsigned char* s=*(const unsigned char**) string;
|
||||
unsigned char f;
|
||||
void scan_urlencoded_skipvalue(char **string) {
|
||||
const unsigned char *s = *(const unsigned char **)string;
|
||||
unsigned char f;
|
||||
|
||||
/* Since we are asked to skip the 'value', we assume to stop at
|
||||
terminators for a 'value' string position */
|
||||
while( ( f = is_unreserved[ *s++ ] ) & SCAN_SEARCHPATH_VALUE );
|
||||
while ((f = is_unreserved[*s++]) & SCAN_SEARCHPATH_VALUE)
|
||||
;
|
||||
|
||||
/* If we stopped at a hard terminator like \0 or \n, make the
|
||||
next scan_urlencoded_query encounter it again */
|
||||
if( f & SCAN_SEARCHPATH_TERMINATOR ) --s;
|
||||
if (f & SCAN_SEARCHPATH_TERMINATOR)
|
||||
--s;
|
||||
|
||||
*string = (char*)s;
|
||||
*string = (char *)s;
|
||||
}
|
||||
|
||||
int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags) {
|
||||
char *deststring = *string;
|
||||
ssize_t match_length = scan_urlencoded_query(string, deststring, flags );
|
||||
int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags) {
|
||||
char *deststring = *string;
|
||||
ssize_t match_length = scan_urlencoded_query(string, deststring, flags);
|
||||
|
||||
if( match_length < 0 ) return match_length;
|
||||
if( match_length == 0 ) return -3;
|
||||
if (match_length < 0)
|
||||
return match_length;
|
||||
if (match_length == 0)
|
||||
return -3;
|
||||
|
||||
while( keywords->key ) {
|
||||
if( !strncmp( keywords->key, deststring, match_length ) && !keywords->key[match_length] )
|
||||
while (keywords->key) {
|
||||
if (!strncmp(keywords->key, deststring, match_length) && !keywords->key[match_length])
|
||||
return keywords->value;
|
||||
keywords++;
|
||||
}
|
||||
@ -84,59 +92,74 @@ int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCH
|
||||
}
|
||||
|
||||
ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) {
|
||||
const unsigned char* s=*(const unsigned char**) string;
|
||||
unsigned char *d = (unsigned char*)deststring;
|
||||
unsigned char b, c;
|
||||
const unsigned char *s = *(const unsigned char **)string;
|
||||
unsigned char *d = (unsigned char *)deststring;
|
||||
unsigned char b, c;
|
||||
|
||||
/* This is the main decoding loop.
|
||||
'flag' determines, which characters are non-terminating in current context
|
||||
(ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path )
|
||||
*/
|
||||
while( is_unreserved[ c = *s++ ] & flags ) {
|
||||
while (is_unreserved[c = *s++] & flags) {
|
||||
|
||||
/* When encountering an url escaped character, try to decode */
|
||||
if( c=='%') {
|
||||
if( ( b = fromhex(*s++) ) == 0xff ) return -1;
|
||||
if( ( c = fromhex(*s++) ) == 0xff ) return -1;
|
||||
c|=(b<<4);
|
||||
if (c == '%') {
|
||||
if ((b = fromhex(*s++)) == 0xff)
|
||||
return -1;
|
||||
if ((c = fromhex(*s++)) == 0xff)
|
||||
return -1;
|
||||
c |= (b << 4);
|
||||
}
|
||||
|
||||
/* Write (possibly decoded) character to output */
|
||||
*d++ = c;
|
||||
}
|
||||
|
||||
switch( c ) {
|
||||
case 0: case '\r': case '\n': case ' ':
|
||||
switch (c) {
|
||||
case 0:
|
||||
case '\r':
|
||||
case '\n':
|
||||
case ' ':
|
||||
/* If we started scanning on a hard terminator, indicate we've finished */
|
||||
if( d == (unsigned char*)deststring ) return -2;
|
||||
if (d == (unsigned char *)deststring)
|
||||
return -2;
|
||||
|
||||
/* Else make the next call to scan_urlencoded_param encounter it again */
|
||||
--s;
|
||||
break;
|
||||
case '?':
|
||||
if( flags != SCAN_PATH ) return -1;
|
||||
if (flags != SCAN_PATH)
|
||||
return -1;
|
||||
break;
|
||||
case '=':
|
||||
if( flags != SCAN_SEARCHPATH_PARAM ) return -1;
|
||||
if (flags != SCAN_SEARCHPATH_PARAM)
|
||||
return -1;
|
||||
break;
|
||||
case '&':
|
||||
if( flags == SCAN_PATH ) return -1;
|
||||
if( flags == SCAN_SEARCHPATH_PARAM ) --s;
|
||||
if (flags == SCAN_PATH)
|
||||
return -1;
|
||||
if (flags == SCAN_SEARCHPATH_PARAM)
|
||||
--s;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
*string = (char *)s;
|
||||
return d - (unsigned char*)deststring;
|
||||
return d - (unsigned char *)deststring;
|
||||
}
|
||||
|
||||
ssize_t scan_fixed_int( char *data, size_t len, int *tmp ) {
|
||||
ssize_t scan_fixed_int(char *data, size_t len, int *tmp) {
|
||||
int minus = 0;
|
||||
*tmp = 0;
|
||||
if( *data == '-' ) --len, ++data, ++minus;
|
||||
while( (len > 0) && (*data >= '0') && (*data <= '9') ) { --len; *tmp = 10**tmp + *data++-'0'; }
|
||||
if( minus ) *tmp = -*tmp;
|
||||
*tmp = 0;
|
||||
if (*data == '-')
|
||||
--len, ++data, ++minus;
|
||||
while ((len > 0) && (*data >= '0') && (*data <= '9')) {
|
||||
--len;
|
||||
*tmp = 10 * *tmp + *data++ - '0';
|
||||
}
|
||||
if (minus)
|
||||
*tmp = -*tmp;
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -38,18 +38,18 @@ ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_F
|
||||
or -2 for terminator found
|
||||
or -3 for no keyword matched
|
||||
*/
|
||||
int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags);
|
||||
int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags);
|
||||
|
||||
/* string in: pointer to value of a param=value pair to skip
|
||||
out: pointer to next scan position on return
|
||||
*/
|
||||
void scan_urlencoded_skipvalue( char **string );
|
||||
void scan_urlencoded_skipvalue(char **string);
|
||||
|
||||
/* data pointer to len chars of string
|
||||
len length of chars in data to parse
|
||||
number number to receive result
|
||||
returns number of bytes not parsed, mostly !=0 means fail
|
||||
*/
|
||||
ssize_t scan_fixed_int( char *data, size_t len, int *number );
|
||||
ssize_t scan_fixed_int(char *data, size_t len, int *number);
|
||||
|
||||
#endif
|
||||
|
570
trackerlogic.c
570
trackerlogic.c
@ -4,119 +4,117 @@
|
||||
$id$ */
|
||||
|
||||
/* System */
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* Libowfat */
|
||||
#include "array.h"
|
||||
#include "byte.h"
|
||||
#include "io.h"
|
||||
#include "iob.h"
|
||||
#include "ip6.h"
|
||||
#include "array.h"
|
||||
|
||||
/* Opentracker */
|
||||
#include "trackerlogic.h"
|
||||
#include "ot_vector.h"
|
||||
#include "ot_accesslist.h"
|
||||
#include "ot_clean.h"
|
||||
#include "ot_fullscrape.h"
|
||||
#include "ot_http.h"
|
||||
#include "ot_livesync.h"
|
||||
#include "ot_mutex.h"
|
||||
#include "ot_stats.h"
|
||||
#include "ot_clean.h"
|
||||
#include "ot_http.h"
|
||||
#include "ot_accesslist.h"
|
||||
#include "ot_fullscrape.h"
|
||||
#include "ot_livesync.h"
|
||||
#include "ot_vector.h"
|
||||
#include "trackerlogic.h"
|
||||
|
||||
/* Forward declaration */
|
||||
size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto );
|
||||
size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto);
|
||||
|
||||
void free_peerlist( ot_peerlist *peer_list ) {
|
||||
if( peer_list->peers.data ) {
|
||||
if( OT_PEERLIST_HASBUCKETS( peer_list ) )
|
||||
vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size );
|
||||
void free_peerlist(ot_peerlist *peer_list) {
|
||||
if (peer_list->peers.data) {
|
||||
if (OT_PEERLIST_HASBUCKETS(peer_list))
|
||||
vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
|
||||
else
|
||||
free( peer_list->peers.data );
|
||||
free(peer_list->peers.data);
|
||||
}
|
||||
free( peer_list );
|
||||
free(peer_list);
|
||||
}
|
||||
|
||||
void add_torrent_from_saved_state( ot_hash const hash, ot_time base, size_t down_count ) {
|
||||
void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count) {
|
||||
int exactmatch;
|
||||
ot_torrent *torrent;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash );
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
|
||||
|
||||
if( !accesslist_hashisvalid( hash ) )
|
||||
return mutex_bucket_unlock_by_hash( hash, 0 );
|
||||
if (!accesslist_hashisvalid(hash))
|
||||
return mutex_bucket_unlock_by_hash(hash, 0);
|
||||
|
||||
torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
|
||||
if( !torrent || exactmatch )
|
||||
return mutex_bucket_unlock_by_hash( hash, 0 );
|
||||
torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
|
||||
if (!torrent || exactmatch)
|
||||
return mutex_bucket_unlock_by_hash(hash, 0);
|
||||
|
||||
/* Create a new torrent entry, then */
|
||||
byte_zero( torrent, sizeof( ot_torrent ) );
|
||||
memcpy( torrent->hash, hash, sizeof(ot_hash) );
|
||||
byte_zero(torrent, sizeof(ot_torrent));
|
||||
memcpy(torrent->hash, hash, sizeof(ot_hash));
|
||||
|
||||
if( !( torrent->peer_list6 = malloc( sizeof (ot_peerlist) ) ) ||
|
||||
!( torrent->peer_list4 = malloc( sizeof (ot_peerlist) ) ) ) {
|
||||
vector_remove_torrent( torrents_list, torrent );
|
||||
return mutex_bucket_unlock_by_hash( hash, 0 );
|
||||
if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
|
||||
vector_remove_torrent(torrents_list, torrent);
|
||||
return mutex_bucket_unlock_by_hash(hash, 0);
|
||||
}
|
||||
|
||||
byte_zero( torrent->peer_list6, sizeof( ot_peerlist ) );
|
||||
byte_zero( torrent->peer_list4, sizeof( ot_peerlist ) );
|
||||
torrent->peer_list6->base = base;
|
||||
torrent->peer_list4->base = base;
|
||||
byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
|
||||
byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
|
||||
torrent->peer_list6->base = base;
|
||||
torrent->peer_list4->base = base;
|
||||
torrent->peer_list6->down_count = down_count;
|
||||
torrent->peer_list4->down_count = down_count;
|
||||
|
||||
return mutex_bucket_unlock_by_hash( hash, 1 );
|
||||
return mutex_bucket_unlock_by_hash(hash, 1);
|
||||
}
|
||||
|
||||
size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) {
|
||||
int exactmatch, delta_torrentcount = 0;
|
||||
ot_torrent *torrent;
|
||||
ot_peer *peer_dest;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
|
||||
ot_peerlist *peer_list;
|
||||
size_t peer_size; /* initialized in next line */
|
||||
size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount) {
|
||||
int exactmatch, delta_torrentcount = 0;
|
||||
ot_torrent *torrent;
|
||||
ot_peer *peer_dest;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
|
||||
ot_peerlist *peer_list;
|
||||
size_t peer_size; /* initialized in next line */
|
||||
ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
|
||||
|
||||
if( !accesslist_hashisvalid( *ws->hash ) ) {
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
|
||||
if( proto == FLAG_TCP ) {
|
||||
if (!accesslist_hashisvalid(*ws->hash)) {
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, 0);
|
||||
if (proto == FLAG_TCP) {
|
||||
const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e";
|
||||
memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) );
|
||||
return strlen( invalid_hash );
|
||||
memcpy(ws->reply, invalid_hash, strlen(invalid_hash));
|
||||
return strlen(invalid_hash);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
|
||||
if( !torrent ) {
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
|
||||
torrent = vector_find_or_insert(torrents_list, (void *)ws->hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
|
||||
if (!torrent) {
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( !exactmatch ) {
|
||||
if (!exactmatch) {
|
||||
/* Create a new torrent entry, then */
|
||||
byte_zero( torrent, sizeof(ot_torrent));
|
||||
memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) );
|
||||
byte_zero(torrent, sizeof(ot_torrent));
|
||||
memcpy(torrent->hash, *ws->hash, sizeof(ot_hash));
|
||||
|
||||
if( !( torrent->peer_list6 = malloc( sizeof (ot_peerlist) ) ) ||
|
||||
!( torrent->peer_list4 = malloc( sizeof (ot_peerlist) ) ) ) {
|
||||
vector_remove_torrent( torrents_list, torrent );
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
|
||||
if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
|
||||
vector_remove_torrent(torrents_list, torrent);
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
byte_zero( torrent->peer_list6, sizeof( ot_peerlist ) );
|
||||
byte_zero( torrent->peer_list4, sizeof( ot_peerlist ) );
|
||||
byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
|
||||
byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
|
||||
delta_torrentcount = 1;
|
||||
} else
|
||||
clean_single_torrent( torrent );
|
||||
clean_single_torrent(torrent);
|
||||
|
||||
torrent->peer_list6->base = g_now_minutes;
|
||||
torrent->peer_list4->base = g_now_minutes;
|
||||
@ -124,99 +122,99 @@ size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstr
|
||||
peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
|
||||
|
||||
/* Check for peer in torrent */
|
||||
peer_dest = vector_find_or_insert_peer( &(peer_list->peers), peer_src, peer_size, &exactmatch );
|
||||
if( !peer_dest ) {
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
|
||||
peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer_src, peer_size, &exactmatch);
|
||||
if (!peer_dest) {
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Tell peer that it's fresh */
|
||||
OT_PEERTIME( ws->peer, OT_PEER_SIZE6 ) = 0;
|
||||
OT_PEERTIME(ws->peer, OT_PEER_SIZE6) = 0;
|
||||
|
||||
/* Sanitize flags: Whoever claims to have completed download, must be a seeder */
|
||||
if( ( OT_PEERFLAG( ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED )
|
||||
OT_PEERFLAG( ws->peer ) ^= PEER_FLAG_COMPLETED;
|
||||
if ((OT_PEERFLAG(ws->peer) & (PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING)) == PEER_FLAG_COMPLETED)
|
||||
OT_PEERFLAG(ws->peer) ^= PEER_FLAG_COMPLETED;
|
||||
|
||||
/* If we hadn't had a match create peer there */
|
||||
if( !exactmatch ) {
|
||||
if (!exactmatch) {
|
||||
|
||||
#ifdef WANT_SYNC_LIVE
|
||||
if( proto == FLAG_MCA )
|
||||
OT_PEERFLAG( ws->peer ) |= PEER_FLAG_FROM_SYNC;
|
||||
if (proto == FLAG_MCA)
|
||||
OT_PEERFLAG(ws->peer) |= PEER_FLAG_FROM_SYNC;
|
||||
else
|
||||
livesync_tell( ws );
|
||||
livesync_tell(ws);
|
||||
#endif
|
||||
|
||||
peer_list->peer_count++;
|
||||
if( OT_PEERFLAG( ws->peer ) & PEER_FLAG_COMPLETED ) {
|
||||
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED) {
|
||||
peer_list->down_count++;
|
||||
stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws );
|
||||
stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
|
||||
}
|
||||
if( OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING )
|
||||
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)
|
||||
peer_list->seed_count++;
|
||||
|
||||
} else {
|
||||
stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest, peer_size ) );
|
||||
stats_issue_event(EVENT_RENEW, 0, OT_PEERTIME(peer_dest, peer_size));
|
||||
#ifdef WANT_SPOT_WOODPECKER
|
||||
if( ( OT_PEERTIME(peer_dest, peer_size) > 0 ) && ( OT_PEERTIME(peer_dest, peer_size) < 20 ) )
|
||||
stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer );
|
||||
if ((OT_PEERTIME(peer_dest, peer_size) > 0) && (OT_PEERTIME(peer_dest, peer_size) < 20))
|
||||
stats_issue_event(EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer);
|
||||
#endif
|
||||
#ifdef WANT_SYNC_LIVE
|
||||
/* Won't live sync peers that come back too fast. Only exception:
|
||||
fresh "completed" reports */
|
||||
if( proto != FLAG_MCA ) {
|
||||
if( OT_PEERTIME( peer_dest, peer_size ) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
|
||||
( !(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED ) ) )
|
||||
livesync_tell( ws );
|
||||
if (proto != FLAG_MCA) {
|
||||
if (OT_PEERTIME(peer_dest, peer_size) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
|
||||
(!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)))
|
||||
livesync_tell(ws);
|
||||
}
|
||||
#endif
|
||||
|
||||
if( (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING ) )
|
||||
if ((OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
|
||||
peer_list->seed_count--;
|
||||
if( !(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING ) )
|
||||
if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
|
||||
peer_list->seed_count++;
|
||||
if( !(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED ) ) {
|
||||
if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)) {
|
||||
peer_list->down_count++;
|
||||
stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws );
|
||||
stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
|
||||
}
|
||||
if( OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED )
|
||||
OT_PEERFLAG( ws->peer ) |= PEER_FLAG_COMPLETED;
|
||||
if (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED)
|
||||
OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
|
||||
}
|
||||
|
||||
memcpy( peer_dest, peer_src, peer_size );
|
||||
memcpy(peer_dest, peer_src, peer_size);
|
||||
#ifdef WANT_SYNC
|
||||
if( proto == FLAG_MCA ) {
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
|
||||
if (proto == FLAG_MCA) {
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
ws->reply_size = return_peers_for_torrent( ws, torrent, amount, ws->reply, proto );
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
|
||||
ws->reply_size = return_peers_for_torrent(ws, torrent, amount, ws->reply, proto);
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
|
||||
return ws->reply_size;
|
||||
}
|
||||
|
||||
static size_t return_peers_all( ot_peerlist *peer_list, size_t peer_size, char *reply ) {
|
||||
static size_t return_peers_all(ot_peerlist *peer_list, size_t peer_size, char *reply) {
|
||||
unsigned int bucket, num_buckets = 1;
|
||||
ot_vector * bucket_list = &peer_list->peers;
|
||||
ot_vector *bucket_list = &peer_list->peers;
|
||||
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
|
||||
size_t result = compare_size * peer_list->peer_count;
|
||||
char * r_end = reply + result;
|
||||
size_t result = compare_size * peer_list->peer_count;
|
||||
char *r_end = reply + result;
|
||||
|
||||
if( OT_PEERLIST_HASBUCKETS(peer_list) ) {
|
||||
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
|
||||
num_buckets = bucket_list->size;
|
||||
bucket_list = (ot_vector *)bucket_list->data;
|
||||
}
|
||||
|
||||
for( bucket = 0; bucket<num_buckets; ++bucket ) {
|
||||
ot_peer *peers = bucket_list[bucket].data;
|
||||
for (bucket = 0; bucket < num_buckets; ++bucket) {
|
||||
ot_peer *peers = bucket_list[bucket].data;
|
||||
size_t peer_count = bucket_list[bucket].size;
|
||||
while( peer_count-- ) {
|
||||
if( OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING ) {
|
||||
while (peer_count--) {
|
||||
if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) {
|
||||
r_end -= compare_size;
|
||||
memcpy( r_end, peers, compare_size);
|
||||
memcpy(r_end, peers, compare_size);
|
||||
} else {
|
||||
memcpy( reply, peers, compare_size );
|
||||
memcpy(reply, peers, compare_size);
|
||||
reply += compare_size;
|
||||
}
|
||||
peers += peer_size;
|
||||
@ -225,45 +223,47 @@ static size_t return_peers_all( ot_peerlist *peer_list, size_t peer_size, char *
|
||||
return result;
|
||||
}
|
||||
|
||||
static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply ) {
|
||||
static size_t return_peers_selection(struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply) {
|
||||
unsigned int bucket_offset, bucket_index = 0, num_buckets = 1;
|
||||
ot_vector * bucket_list = &peer_list->peers;
|
||||
unsigned int shifted_pc = peer_list->peer_count;
|
||||
ot_vector *bucket_list = &peer_list->peers;
|
||||
unsigned int shifted_pc = peer_list->peer_count;
|
||||
unsigned int shifted_step = 0;
|
||||
unsigned int shift = 0;
|
||||
unsigned int shift = 0;
|
||||
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
|
||||
size_t result = compare_size * amount;
|
||||
char * r_end = reply + result;
|
||||
size_t result = compare_size * amount;
|
||||
char *r_end = reply + result;
|
||||
|
||||
if( OT_PEERLIST_HASBUCKETS(peer_list) ) {
|
||||
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
|
||||
num_buckets = bucket_list->size;
|
||||
bucket_list = (ot_vector *)bucket_list->data;
|
||||
}
|
||||
|
||||
/* Make fixpoint arithmetic as exact as possible */
|
||||
#define MAXPRECBIT (1<<(8*sizeof(int)-3))
|
||||
while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; }
|
||||
shifted_step = shifted_pc/amount;
|
||||
#define MAXPRECBIT (1 << (8 * sizeof(int) - 3))
|
||||
while (!(shifted_pc & MAXPRECBIT)) {
|
||||
shifted_pc <<= 1;
|
||||
shift++;
|
||||
}
|
||||
shifted_step = shifted_pc / amount;
|
||||
#undef MAXPRECBIT
|
||||
|
||||
/* Initialize somewhere in the middle of peers so that
|
||||
fixpoint's aliasing doesn't alway miss the same peers */
|
||||
bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count;
|
||||
|
||||
while( amount-- ) {
|
||||
while (amount--) {
|
||||
ot_peer *peer;
|
||||
|
||||
/* This is the aliased, non shifted range, next value may fall into */
|
||||
unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) -
|
||||
( ( amount * shifted_step ) >> shift );
|
||||
bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
|
||||
unsigned int diff = (((amount + 1) * shifted_step) >> shift) - ((amount * shifted_step) >> shift);
|
||||
bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
|
||||
|
||||
while( bucket_offset >= bucket_list[bucket_index].size ) {
|
||||
while (bucket_offset >= bucket_list[bucket_index].size) {
|
||||
bucket_offset -= bucket_list[bucket_index].size;
|
||||
bucket_index = ( bucket_index + 1 ) % num_buckets;
|
||||
bucket_index = (bucket_index + 1) % num_buckets;
|
||||
}
|
||||
peer = bucket_list[bucket_index].data + peer_size * bucket_offset;
|
||||
if( OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING ) {
|
||||
if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) {
|
||||
r_end -= compare_size;
|
||||
memcpy(r_end, peer, compare_size);
|
||||
} else {
|
||||
@ -274,51 +274,51 @@ static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *pee
|
||||
return result;
|
||||
}
|
||||
|
||||
static size_t return_peers_for_torrent_udp( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply ) {
|
||||
char *r = reply;
|
||||
size_t peer_size = peer_size_from_peer6(&ws->peer);
|
||||
ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
|
||||
static size_t return_peers_for_torrent_udp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
|
||||
char *r = reply;
|
||||
size_t peer_size = peer_size_from_peer6(&ws->peer);
|
||||
ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
|
||||
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
|
||||
size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
|
||||
|
||||
if( amount > peer_list->peer_count )
|
||||
if (amount > peer_list->peer_count)
|
||||
amount = peer_list->peer_count;
|
||||
|
||||
*(uint32_t*)(r+0) = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM );
|
||||
*(uint32_t*)(r+4) = htonl( peer_count - seed_count );
|
||||
*(uint32_t*)(r+8) = htonl( seed_count );
|
||||
r += 12;
|
||||
*(uint32_t *)(r + 0) = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
|
||||
*(uint32_t *)(r + 4) = htonl(peer_count - seed_count);
|
||||
*(uint32_t *)(r + 8) = htonl(seed_count);
|
||||
r += 12;
|
||||
|
||||
if( amount ) {
|
||||
if( amount == peer_list->peer_count )
|
||||
r += return_peers_all( peer_list, peer_size, r );
|
||||
if (amount) {
|
||||
if (amount == peer_list->peer_count)
|
||||
r += return_peers_all(peer_list, peer_size, r);
|
||||
else
|
||||
r += return_peers_selection( ws, peer_list, peer_size, amount, r );
|
||||
r += return_peers_selection(ws, peer_list, peer_size, amount, r);
|
||||
}
|
||||
return r - reply;
|
||||
}
|
||||
|
||||
static size_t return_peers_for_torrent_tcp( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply ) {
|
||||
char *r = reply;
|
||||
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
|
||||
size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
|
||||
size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
|
||||
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count;
|
||||
static size_t return_peers_for_torrent_tcp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
|
||||
char *r = reply;
|
||||
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
|
||||
size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
|
||||
size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
|
||||
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count;
|
||||
|
||||
/* Simple case: amount of peers in both lists is less than requested, here we return all results */
|
||||
size_t amount_v4 = torrent->peer_list4->peer_count;
|
||||
size_t amount_v6 = torrent->peer_list6->peer_count;
|
||||
size_t amount_v4 = torrent->peer_list4->peer_count;
|
||||
size_t amount_v6 = torrent->peer_list6->peer_count;
|
||||
|
||||
/* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */
|
||||
if( amount_v4 + amount_v6 > amount ) {
|
||||
size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4;
|
||||
if (amount_v4 + amount_v6 > amount) {
|
||||
size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4;
|
||||
const size_t SCALE = 1024;
|
||||
|
||||
/* If possible, fill at least a quarter of peer from each family */
|
||||
if( amount / 4 <= amount_v4 )
|
||||
amount_v4 = amount / 4;
|
||||
if( amount / 4 <= amount_v6 )
|
||||
amount_v6 = amount / 4;
|
||||
if (amount / 4 <= amount_v4)
|
||||
amount_v4 = amount / 4;
|
||||
if (amount / 4 <= amount_v6)
|
||||
amount_v6 = amount / 4;
|
||||
|
||||
/* Fill the rest according to which family's pool provides more peers */
|
||||
amount_left = amount - (amount_v4 + amount_v6);
|
||||
@ -326,37 +326,38 @@ static size_t return_peers_for_torrent_tcp( struct ot_workstruct * ws, ot_torren
|
||||
left_v4 = torrent->peer_list4->peer_count - amount_v4;
|
||||
left_v6 = torrent->peer_list6->peer_count - amount_v6;
|
||||
|
||||
if( left_v4 + left_v6 ) {
|
||||
percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6);
|
||||
percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6);
|
||||
if (left_v4 + left_v6) {
|
||||
percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6);
|
||||
percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6);
|
||||
}
|
||||
|
||||
amount_v4 += (amount_left * percent_v4) / SCALE;
|
||||
amount_v6 += (amount_left * percent_v6) / SCALE;
|
||||
amount_v4 += (amount_left * percent_v4) / SCALE;
|
||||
amount_v6 += (amount_left * percent_v6) / SCALE;
|
||||
|
||||
/* Integer division rounding can leave out a peer */
|
||||
if( amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count )
|
||||
if (amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count)
|
||||
++amount_v6;
|
||||
if( amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count )
|
||||
if (amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count)
|
||||
++amount_v4;
|
||||
}
|
||||
|
||||
r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval/2 );
|
||||
r +=
|
||||
sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval / 2);
|
||||
|
||||
if( amount_v4 ) {
|
||||
r += sprintf( r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4);
|
||||
if( amount_v4 == torrent->peer_list4->peer_count )
|
||||
r += return_peers_all( torrent->peer_list4, OT_PEER_SIZE4, r );
|
||||
if (amount_v4) {
|
||||
r += sprintf(r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4);
|
||||
if (amount_v4 == torrent->peer_list4->peer_count)
|
||||
r += return_peers_all(torrent->peer_list4, OT_PEER_SIZE4, r);
|
||||
else
|
||||
r += return_peers_selection( ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r );
|
||||
r += return_peers_selection(ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r);
|
||||
}
|
||||
|
||||
if( amount_v6 ) {
|
||||
r += sprintf( r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6);
|
||||
if( amount_v6 == torrent->peer_list6->peer_count )
|
||||
r += return_peers_all( torrent->peer_list6, OT_PEER_SIZE6, r );
|
||||
if (amount_v6) {
|
||||
r += sprintf(r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6);
|
||||
if (amount_v6 == torrent->peer_list6->peer_count)
|
||||
r += return_peers_all(torrent->peer_list6, OT_PEER_SIZE6, r);
|
||||
else
|
||||
r += return_peers_selection( ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r );
|
||||
r += return_peers_selection(ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r);
|
||||
}
|
||||
|
||||
*r++ = 'e';
|
||||
@ -365,154 +366,159 @@ static size_t return_peers_for_torrent_tcp( struct ot_workstruct * ws, ot_torren
|
||||
}
|
||||
|
||||
/* Compiles a list of random peers for a torrent
|
||||
* Reply must have enough space to hold:
|
||||
* 92 + 6 * amount bytes for TCP/IPv4
|
||||
* 92 + 18 * amount bytes for TCP/IPv6
|
||||
* 12 + 6 * amount bytes for UDP/IPv4
|
||||
* 12 + 18 * amount bytes for UDP/IPv6
|
||||
* Does not yet check not to return self
|
||||
*/
|
||||
size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) {
|
||||
* Reply must have enough space to hold:
|
||||
* 92 + 6 * amount bytes for TCP/IPv4
|
||||
* 92 + 18 * amount bytes for TCP/IPv6
|
||||
* 12 + 6 * amount bytes for UDP/IPv4
|
||||
* 12 + 18 * amount bytes for UDP/IPv6
|
||||
* Does not yet check not to return self
|
||||
*/
|
||||
size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto) {
|
||||
return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply);
|
||||
}
|
||||
|
||||
/* Fetches scrape info for a specific torrent */
|
||||
size_t return_udp_scrape_for_torrent( ot_hash const hash, char *reply ) {
|
||||
int exactmatch, delta_torrentcount = 0;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash );
|
||||
ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
|
||||
size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply) {
|
||||
int exactmatch, delta_torrentcount = 0;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
|
||||
ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
|
||||
|
||||
if( !exactmatch ) {
|
||||
memset( reply, 0, 12);
|
||||
if (!exactmatch) {
|
||||
memset(reply, 0, 12);
|
||||
} else {
|
||||
uint32_t *r = (uint32_t*) reply;
|
||||
uint32_t *r = (uint32_t *)reply;
|
||||
|
||||
if( clean_single_torrent( torrent ) ) {
|
||||
vector_remove_torrent( torrents_list, torrent );
|
||||
memset( reply, 0, 12);
|
||||
if (clean_single_torrent(torrent)) {
|
||||
vector_remove_torrent(torrents_list, torrent);
|
||||
memset(reply, 0, 12);
|
||||
delta_torrentcount = -1;
|
||||
} else {
|
||||
r[0] = htonl( torrent->peer_list6->seed_count + torrent->peer_list4->seed_count );
|
||||
r[1] = htonl( torrent->peer_list6->down_count + torrent->peer_list4->down_count );
|
||||
r[2] = htonl( torrent->peer_list6->peer_count + torrent->peer_list4->peer_count -
|
||||
torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
|
||||
r[0] = htonl(torrent->peer_list6->seed_count + torrent->peer_list4->seed_count);
|
||||
r[1] = htonl(torrent->peer_list6->down_count + torrent->peer_list4->down_count);
|
||||
r[2] = htonl(torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
|
||||
}
|
||||
}
|
||||
mutex_bucket_unlock_by_hash( hash, delta_torrentcount );
|
||||
mutex_bucket_unlock_by_hash(hash, delta_torrentcount);
|
||||
return 12;
|
||||
}
|
||||
|
||||
/* Fetches scrape info for a specific torrent */
|
||||
size_t return_tcp_scrape_for_torrent( ot_hash const *hash_list, int amount, char *reply ) {
|
||||
size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply) {
|
||||
char *r = reply;
|
||||
int exactmatch, i;
|
||||
|
||||
r += sprintf( r, "d5:filesd" );
|
||||
r += sprintf(r, "d5:filesd");
|
||||
|
||||
for( i=0; i<amount; ++i ) {
|
||||
for (i = 0; i < amount; ++i) {
|
||||
int delta_torrentcount = 0;
|
||||
ot_hash const *hash = hash_list + i;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash( *hash );
|
||||
ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
|
||||
ot_hash const *hash = hash_list + i;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash(*hash);
|
||||
ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
|
||||
|
||||
if( exactmatch ) {
|
||||
if( clean_single_torrent( torrent ) ) {
|
||||
vector_remove_torrent( torrents_list, torrent );
|
||||
if (exactmatch) {
|
||||
if (clean_single_torrent(torrent)) {
|
||||
vector_remove_torrent(torrents_list, torrent);
|
||||
delta_torrentcount = -1;
|
||||
} else {
|
||||
*r++='2';*r++='0';*r++=':';
|
||||
memcpy( r, hash, sizeof(ot_hash) ); r+=sizeof(ot_hash);
|
||||
r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee",
|
||||
torrent->peer_list6->seed_count + torrent->peer_list4->seed_count,
|
||||
torrent->peer_list6->down_count + torrent->peer_list4->down_count,
|
||||
torrent->peer_list6->peer_count + torrent->peer_list4->peer_count -
|
||||
torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
|
||||
*r++ = '2';
|
||||
*r++ = '0';
|
||||
*r++ = ':';
|
||||
memcpy(r, hash, sizeof(ot_hash));
|
||||
r += sizeof(ot_hash);
|
||||
r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", torrent->peer_list6->seed_count + torrent->peer_list4->seed_count,
|
||||
torrent->peer_list6->down_count + torrent->peer_list4->down_count,
|
||||
torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
|
||||
}
|
||||
}
|
||||
mutex_bucket_unlock_by_hash( *hash, delta_torrentcount );
|
||||
mutex_bucket_unlock_by_hash(*hash, delta_torrentcount);
|
||||
}
|
||||
|
||||
*r++ = 'e'; *r++ = 'e';
|
||||
*r++ = 'e';
|
||||
*r++ = 'e';
|
||||
return r - reply;
|
||||
}
|
||||
|
||||
static ot_peerlist dummy_list;
|
||||
size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) {
|
||||
size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws) {
|
||||
int exactmatch;
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
|
||||
ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
|
||||
ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
|
||||
ot_torrent *torrent = binary_search(ws->hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
|
||||
ot_peerlist *peer_list = &dummy_list;
|
||||
size_t peer_size; /* initialized in next line */
|
||||
ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
|
||||
ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
|
||||
size_t peer_count = 0, seed_count = 0;
|
||||
|
||||
#ifdef WANT_SYNC_LIVE
|
||||
if( proto != FLAG_MCA ) {
|
||||
OT_PEERFLAG( ws->peer ) |= PEER_FLAG_STOPPED;
|
||||
livesync_tell( ws );
|
||||
if (proto != FLAG_MCA) {
|
||||
OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
|
||||
livesync_tell(ws);
|
||||
}
|
||||
#endif
|
||||
|
||||
if( exactmatch ) {
|
||||
if (exactmatch) {
|
||||
peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
|
||||
switch( vector_remove_peer( &peer_list->peers, peer_src, peer_size ) ) {
|
||||
case 2: peer_list->seed_count--; /* Intentional fallthrough */
|
||||
case 1: peer_list->peer_count--; /* Intentional fallthrough */
|
||||
default: break;
|
||||
switch (vector_remove_peer(&peer_list->peers, peer_src, peer_size)) {
|
||||
case 2:
|
||||
peer_list->seed_count--; /* Intentional fallthrough */
|
||||
case 1:
|
||||
peer_list->peer_count--; /* Intentional fallthrough */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
|
||||
seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
|
||||
}
|
||||
|
||||
|
||||
if( proto == FLAG_TCP ) {
|
||||
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
|
||||
ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval, erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4 );
|
||||
if (proto == FLAG_TCP) {
|
||||
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
|
||||
ws->reply_size = sprintf(ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval,
|
||||
erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4);
|
||||
}
|
||||
|
||||
/* Handle UDP reply */
|
||||
if( proto == FLAG_UDP ) {
|
||||
((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM );
|
||||
((uint32_t*)ws->reply)[3] = htonl( peer_count - seed_count );
|
||||
((uint32_t*)ws->reply)[4] = htonl( seed_count);
|
||||
ws->reply_size = 20;
|
||||
if (proto == FLAG_UDP) {
|
||||
((uint32_t *)ws->reply)[2] = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
|
||||
((uint32_t *)ws->reply)[3] = htonl(peer_count - seed_count);
|
||||
((uint32_t *)ws->reply)[4] = htonl(seed_count);
|
||||
ws->reply_size = 20;
|
||||
}
|
||||
|
||||
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
|
||||
mutex_bucket_unlock_by_hash(*ws->hash, 0);
|
||||
return ws->reply_size;
|
||||
}
|
||||
|
||||
void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) {
|
||||
int bucket;
|
||||
void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data) {
|
||||
int bucket;
|
||||
size_t j;
|
||||
|
||||
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
|
||||
ot_vector *torrents_list = mutex_bucket_lock( bucket );
|
||||
ot_torrent *torrents = (ot_torrent*)(torrents_list->data);
|
||||
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
|
||||
ot_vector *torrents_list = mutex_bucket_lock(bucket);
|
||||
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
|
||||
|
||||
for( j=0; j<torrents_list->size; ++j )
|
||||
if( for_each( torrents + j, data ) )
|
||||
for (j = 0; j < torrents_list->size; ++j)
|
||||
if (for_each(torrents + j, data))
|
||||
break;
|
||||
|
||||
mutex_bucket_unlock( bucket, 0 );
|
||||
if( !g_opentracker_running ) return;
|
||||
mutex_bucket_unlock(bucket, 0);
|
||||
if (!g_opentracker_running)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ot_peer *peer_from_peer6( ot_peer6 *peer, size_t *peer_size ) {
|
||||
ot_ip6 *ip = (ot_ip6*)peer;
|
||||
if( !ip6_isv4mapped(ip) ) {
|
||||
ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size) {
|
||||
ot_ip6 *ip = (ot_ip6 *)peer;
|
||||
if (!ip6_isv4mapped(ip)) {
|
||||
*peer_size = OT_PEER_SIZE6;
|
||||
return (ot_peer*)peer;
|
||||
return (ot_peer *)peer;
|
||||
}
|
||||
*peer_size = OT_PEER_SIZE4;
|
||||
return (ot_peer*)(((uint8_t*)peer) + 12);
|
||||
return (ot_peer *)(((uint8_t *)peer) + 12);
|
||||
}
|
||||
|
||||
size_t peer_size_from_peer6(ot_peer6 *peer) {
|
||||
ot_ip6 *ip = (ot_ip6*)peer;
|
||||
if( !ip6_isv4mapped(ip))
|
||||
size_t peer_size_from_peer6(ot_peer6 *peer) {
|
||||
ot_ip6 *ip = (ot_ip6 *)peer;
|
||||
if (!ip6_isv4mapped(ip))
|
||||
return OT_PEER_SIZE6;
|
||||
return OT_PEER_SIZE4;
|
||||
}
|
||||
@ -520,20 +526,20 @@ size_t peer_size_from_peer6(ot_peer6 *peer) {
|
||||
#ifdef _DEBUG_RANDOMTORRENTS
|
||||
void trackerlogic_add_random_torrents(size_t amount) {
|
||||
struct ot_workstruct ws;
|
||||
memset( &ws, 0, sizeof(ws) );
|
||||
memset(&ws, 0, sizeof(ws));
|
||||
|
||||
ws.inbuf=malloc(G_INBUF_SIZE);
|
||||
ws.outbuf=malloc(G_OUTBUF_SIZE);
|
||||
ws.reply=ws.outbuf;
|
||||
ws.hash=(ot_hash*)ws.inbuf;
|
||||
ws.inbuf = malloc(G_INBUF_SIZE);
|
||||
ws.outbuf = malloc(G_OUTBUF_SIZE);
|
||||
ws.reply = ws.outbuf;
|
||||
ws.hash = (ot_hash *)ws.inbuf;
|
||||
|
||||
while( amount-- ) {
|
||||
while (amount--) {
|
||||
arc4random_buf(ws.hash, sizeof(ot_hash));
|
||||
arc4random_buf(&ws.peer, sizeof(ws.peer));
|
||||
|
||||
OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED;
|
||||
|
||||
add_peer_to_torrent_and_return_peers( FLAG_TCP, &ws, 1 );
|
||||
add_peer_to_torrent_and_return_peers(FLAG_TCP, &ws, 1);
|
||||
}
|
||||
|
||||
free(ws.inbuf);
|
||||
@ -541,54 +547,54 @@ void trackerlogic_add_random_torrents(size_t amount) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void exerr( char * message ) {
|
||||
fprintf( stderr, "%s\n", message );
|
||||
exit( 111 );
|
||||
void exerr(char *message) {
|
||||
fprintf(stderr, "%s\n", message);
|
||||
exit(111);
|
||||
}
|
||||
|
||||
void trackerlogic_init( ) {
|
||||
void trackerlogic_init() {
|
||||
g_tracker_id = random();
|
||||
|
||||
if( !g_stats_path )
|
||||
if (!g_stats_path)
|
||||
g_stats_path = "stats";
|
||||
g_stats_path_len = strlen( g_stats_path );
|
||||
g_stats_path_len = strlen(g_stats_path);
|
||||
|
||||
/* Initialise background worker threads */
|
||||
mutex_init( );
|
||||
clean_init( );
|
||||
fullscrape_init( );
|
||||
accesslist_init( );
|
||||
livesync_init( );
|
||||
stats_init( );
|
||||
mutex_init();
|
||||
clean_init();
|
||||
fullscrape_init();
|
||||
accesslist_init();
|
||||
livesync_init();
|
||||
stats_init();
|
||||
}
|
||||
|
||||
void trackerlogic_deinit( void ) {
|
||||
int bucket, delta_torrentcount = 0;
|
||||
void trackerlogic_deinit(void) {
|
||||
int bucket, delta_torrentcount = 0;
|
||||
size_t j;
|
||||
|
||||
/* Free all torrents... */
|
||||
for(bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
|
||||
ot_vector *torrents_list = mutex_bucket_lock( bucket );
|
||||
if( torrents_list->size ) {
|
||||
for( j=0; j<torrents_list->size; ++j ) {
|
||||
ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j;
|
||||
free_peerlist( torrent->peer_list6 );
|
||||
free_peerlist( torrent->peer_list4 );
|
||||
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
|
||||
ot_vector *torrents_list = mutex_bucket_lock(bucket);
|
||||
if (torrents_list->size) {
|
||||
for (j = 0; j < torrents_list->size; ++j) {
|
||||
ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + j;
|
||||
free_peerlist(torrent->peer_list6);
|
||||
free_peerlist(torrent->peer_list4);
|
||||
delta_torrentcount -= 1;
|
||||
}
|
||||
free( torrents_list->data );
|
||||
free(torrents_list->data);
|
||||
}
|
||||
mutex_bucket_unlock( bucket, delta_torrentcount );
|
||||
mutex_bucket_unlock(bucket, delta_torrentcount);
|
||||
}
|
||||
|
||||
/* Deinitialise background worker threads */
|
||||
stats_deinit( );
|
||||
livesync_deinit( );
|
||||
accesslist_deinit( );
|
||||
fullscrape_deinit( );
|
||||
clean_deinit( );
|
||||
stats_deinit();
|
||||
livesync_deinit();
|
||||
accesslist_deinit();
|
||||
fullscrape_deinit();
|
||||
clean_deinit();
|
||||
/* Release mutexes */
|
||||
mutex_deinit( );
|
||||
mutex_deinit();
|
||||
}
|
||||
|
||||
const char *g_version_trackerlogic_c = "$Source$: $Revision$\n";
|
||||
|
153
trackerlogic.h
153
trackerlogic.h
@ -6,11 +6,11 @@
|
||||
#ifndef OT_TRACKERLOGIC_H__
|
||||
#define OT_TRACKERLOGIC_H__
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <time.h>
|
||||
|
||||
#if defined(__linux__) && defined(WANT_ARC4RANDOM)
|
||||
#include <bsd/stdlib.h>
|
||||
@ -22,73 +22,76 @@
|
||||
typedef uint8_t ot_hash[20];
|
||||
typedef time_t ot_time;
|
||||
typedef char ot_ip6[16];
|
||||
typedef struct { ot_ip6 address; int bits; }
|
||||
ot_net;
|
||||
typedef struct {
|
||||
ot_ip6 address;
|
||||
int bits;
|
||||
} ot_net;
|
||||
/* List of peers should fit in a single UDP packet (around 1200 bytes) */
|
||||
#define OT_MAX_PEERS_UDP6 66
|
||||
#define OT_MAX_PEERS_UDP4 200
|
||||
#define OT_MAX_PEERS_UDP6 66
|
||||
#define OT_MAX_PEERS_UDP4 200
|
||||
|
||||
#define OT_IP_SIZE6 16
|
||||
#define OT_IP_SIZE4 4
|
||||
#define OT_PORT_SIZE 2
|
||||
#define OT_FLAG_SIZE 1
|
||||
#define OT_TIME_SIZE 1
|
||||
#define OT_IP_SIZE6 16
|
||||
#define OT_IP_SIZE4 4
|
||||
#define OT_PORT_SIZE 2
|
||||
#define OT_FLAG_SIZE 1
|
||||
#define OT_TIME_SIZE 1
|
||||
|
||||
/* Some tracker behaviour tunable */
|
||||
#define OT_CLIENT_TIMEOUT 30
|
||||
#define OT_CLIENT_TIMEOUT 30
|
||||
#define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10
|
||||
#define OT_CLIENT_TIMEOUT_SEND (60*15)
|
||||
#define OT_CLIENT_REQUEST_INTERVAL (60*30)
|
||||
#define OT_CLIENT_REQUEST_VARIATION (60*6)
|
||||
#define OT_CLIENT_TIMEOUT_SEND (60 * 15)
|
||||
#define OT_CLIENT_REQUEST_INTERVAL (60 * 30)
|
||||
#define OT_CLIENT_REQUEST_VARIATION (60 * 6)
|
||||
|
||||
#define OT_TORRENT_TIMEOUT_HOURS 24
|
||||
#define OT_TORRENT_TIMEOUT (60*OT_TORRENT_TIMEOUT_HOURS)
|
||||
#define OT_TORRENT_TIMEOUT_HOURS 24
|
||||
#define OT_TORRENT_TIMEOUT (60 * OT_TORRENT_TIMEOUT_HOURS)
|
||||
|
||||
#define OT_CLIENT_REQUEST_INTERVAL_RANDOM ( OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION/2 + (int)( nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION ) )
|
||||
#define OT_CLIENT_REQUEST_INTERVAL_RANDOM \
|
||||
(OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION / 2 + (int)(nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION))
|
||||
|
||||
/* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not
|
||||
fullscrape more frequently than this amount in seconds */
|
||||
#define OT_MODEST_PEER_TIMEOUT (60*5)
|
||||
#define OT_MODEST_PEER_TIMEOUT (60 * 5)
|
||||
|
||||
/* If peers come back before 10 minutes, don't live sync them */
|
||||
#define OT_CLIENT_SYNC_RENEW_BOUNDARY 10
|
||||
|
||||
/* Number of tracker admin ip addresses allowed */
|
||||
#define OT_ADMINIP_MAX 64
|
||||
#define OT_MAX_THREADS 64
|
||||
#define OT_ADMINIP_MAX 64
|
||||
#define OT_MAX_THREADS 64
|
||||
|
||||
/* Number of minutes after announce before peer is removed */
|
||||
#define OT_PEER_TIMEOUT 45
|
||||
#define OT_PEER_TIMEOUT 45
|
||||
|
||||
/* We maintain a list of 1024 pointers to sorted list of ot_torrent structs
|
||||
Sort key is, of course, its hash */
|
||||
#define OT_BUCKET_COUNT_BITS 10
|
||||
#define OT_BUCKET_COUNT_BITS 10
|
||||
|
||||
#define OT_BUCKET_COUNT (1<<OT_BUCKET_COUNT_BITS)
|
||||
#define OT_BUCKET_COUNT_SHIFT (32-OT_BUCKET_COUNT_BITS)
|
||||
#define OT_BUCKET_COUNT (1 << OT_BUCKET_COUNT_BITS)
|
||||
#define OT_BUCKET_COUNT_SHIFT (32 - OT_BUCKET_COUNT_BITS)
|
||||
|
||||
/* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create
|
||||
on startup */
|
||||
#define RANDOMTORRENTS (1024*1024*1)
|
||||
#define RANDOMTORRENTS (1024 * 1024 * 1)
|
||||
|
||||
/* From opentracker.c */
|
||||
extern time_t g_now_seconds;
|
||||
extern time_t g_now_seconds;
|
||||
extern volatile int g_opentracker_running;
|
||||
#define g_now_minutes (g_now_seconds/60)
|
||||
#define g_now_minutes (g_now_seconds / 60)
|
||||
|
||||
extern uint32_t g_tracker_id;
|
||||
typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG;
|
||||
|
||||
#define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6)+(OT_PORT_SIZE))
|
||||
#define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4)+(OT_PORT_SIZE))
|
||||
#define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE)-(OT_TIME_SIZE)-(OT_FLAG_SIZE))
|
||||
#define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6) + (OT_PORT_SIZE))
|
||||
#define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4) + (OT_PORT_SIZE))
|
||||
#define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE) - (OT_TIME_SIZE) - (OT_FLAG_SIZE))
|
||||
|
||||
#define OT_PEER_SIZE6 ((OT_TIME_SIZE)+(OT_FLAG_SIZE)+(OT_PEER_COMPARE_SIZE6))
|
||||
#define OT_PEER_SIZE4 ((OT_TIME_SIZE)+(OT_FLAG_SIZE)+(OT_PEER_COMPARE_SIZE4))
|
||||
#define OT_PEER_SIZE6 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE6))
|
||||
#define OT_PEER_SIZE4 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE4))
|
||||
|
||||
typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */
|
||||
typedef uint8_t ot_peer6[OT_PEER_SIZE6];
|
||||
typedef uint8_t ot_peer4[OT_PEER_SIZE4];
|
||||
typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */
|
||||
typedef uint8_t ot_peer6[OT_PEER_SIZE6];
|
||||
typedef uint8_t ot_peer4[OT_PEER_SIZE4];
|
||||
static const uint8_t PEER_FLAG_SEEDING = 0x80;
|
||||
static const uint8_t PEER_FLAG_COMPLETED = 0x40;
|
||||
static const uint8_t PEER_FLAG_STOPPED = 0x20;
|
||||
@ -96,20 +99,20 @@ static const uint8_t PEER_FLAG_FROM_SYNC = 0x10;
|
||||
static const uint8_t PEER_FLAG_LEECHING = 0x00;
|
||||
|
||||
/* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */
|
||||
ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size);
|
||||
size_t peer_size_from_peer6(ot_peer6 *peer);
|
||||
ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size);
|
||||
size_t peer_size_from_peer6(ot_peer6 *peer);
|
||||
|
||||
/* New style */
|
||||
#define OT_SETIP(peer,ip) memcpy((uint8_t*)(peer),(ip),OT_IP_SIZE6)
|
||||
#define OT_SETPORT(peer,port) memcpy(((uint8_t*)(peer))+(OT_IP_SIZE6),(port),2)
|
||||
#define OT_PEERFLAG(peer) (((uint8_t*)(peer))[(OT_IP_SIZE6)+2])
|
||||
#define OT_PEERFLAG_D(peer,peersize) (((uint8_t*)(peer))[(peersize)-2])
|
||||
#define OT_PEERTIME(peer,peersize) (((uint8_t*)(peer))[(peersize)-1])
|
||||
#define OT_SETIP(peer, ip) memcpy((uint8_t *)(peer), (ip), OT_IP_SIZE6)
|
||||
#define OT_SETPORT(peer, port) memcpy(((uint8_t *)(peer)) + (OT_IP_SIZE6), (port), 2)
|
||||
#define OT_PEERFLAG(peer) (((uint8_t *)(peer))[(OT_IP_SIZE6) + 2])
|
||||
#define OT_PEERFLAG_D(peer, peersize) (((uint8_t *)(peer))[(peersize) - 2])
|
||||
#define OT_PEERTIME(peer, peersize) (((uint8_t *)(peer))[(peersize) - 1])
|
||||
|
||||
#define PEERS_BENCODED6 "6:peers6"
|
||||
#define PEERS_BENCODED4 "5:peers"
|
||||
#define PEERS_BENCODED6 "6:peers6"
|
||||
#define PEERS_BENCODED4 "5:peers"
|
||||
|
||||
#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
|
||||
#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
|
||||
|
||||
struct ot_peerlist;
|
||||
typedef struct ot_peerlist ot_peerlist;
|
||||
@ -122,26 +125,26 @@ typedef struct {
|
||||
#include "ot_vector.h"
|
||||
|
||||
struct ot_peerlist {
|
||||
ot_time base;
|
||||
size_t seed_count;
|
||||
size_t peer_count;
|
||||
size_t down_count;
|
||||
/* normal peers vector or
|
||||
pointer to ot_vector[32] buckets if data != NULL and space == 0
|
||||
*/
|
||||
ot_vector peers;
|
||||
ot_time base;
|
||||
size_t seed_count;
|
||||
size_t peer_count;
|
||||
size_t down_count;
|
||||
/* normal peers vector or
|
||||
pointer to ot_vector[32] buckets if data != NULL and space == 0
|
||||
*/
|
||||
ot_vector peers;
|
||||
};
|
||||
#define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space)
|
||||
|
||||
struct ot_workstruct {
|
||||
/* Thread specific, static */
|
||||
char *inbuf;
|
||||
#define G_INBUF_SIZE 8192
|
||||
char *outbuf;
|
||||
#define G_OUTBUF_SIZE 8192
|
||||
#ifdef _DEBUG_HTTPERROR
|
||||
char *debugbuf;
|
||||
#define G_DEBUGBUF_SIZE 8192
|
||||
char *inbuf;
|
||||
#define G_INBUF_SIZE 8192
|
||||
char *outbuf;
|
||||
#define G_OUTBUF_SIZE 8192
|
||||
#ifdef _DEBUG_HTTPERROR
|
||||
char *debugbuf;
|
||||
#define G_DEBUGBUF_SIZE 8192
|
||||
#endif
|
||||
|
||||
/* The peer currently in the working */
|
||||
@ -174,34 +177,34 @@ struct ot_workstruct {
|
||||
#endif
|
||||
|
||||
#ifdef WANT_SYNC
|
||||
#define WANT_SYNC_PARAM( param ) , param
|
||||
#define WANT_SYNC_PARAM(param) , param
|
||||
#else
|
||||
#define WANT_SYNC_PARAM( param )
|
||||
#define WANT_SYNC_PARAM(param)
|
||||
#endif
|
||||
|
||||
#ifdef WANT_LOG_NETWORKS
|
||||
#error Live logging networks disabled at the moment.
|
||||
#endif
|
||||
|
||||
void trackerlogic_init( void );
|
||||
void trackerlogic_deinit( void );
|
||||
void exerr( char * message );
|
||||
void trackerlogic_init(void);
|
||||
void trackerlogic_deinit(void);
|
||||
void exerr(char *message);
|
||||
|
||||
/* add_peer_to_torrent does only release the torrent bucket if from_sync is set,
|
||||
otherwise it is released in return_peers_for_torrent */
|
||||
size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount );
|
||||
size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws );
|
||||
size_t return_tcp_scrape_for_torrent( ot_hash const *hash_list, int amount, char *reply );
|
||||
size_t return_udp_scrape_for_torrent( ot_hash const hash, char *reply );
|
||||
void add_torrent_from_saved_state( ot_hash const hash, ot_time base, size_t down_count );
|
||||
size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount);
|
||||
size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws);
|
||||
size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply);
|
||||
size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply);
|
||||
void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count);
|
||||
#ifdef _DEBUG_RANDOMTORRENTS
|
||||
void trackerlogic_add_random_torrents(size_t amount);
|
||||
void trackerlogic_add_random_torrents(size_t amount);
|
||||
#endif
|
||||
|
||||
/* torrent iterator */
|
||||
void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data );
|
||||
void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data);
|
||||
|
||||
/* Helper, before it moves to its own object */
|
||||
void free_peerlist( ot_peerlist *peer_list );
|
||||
void free_peerlist(ot_peerlist *peer_list);
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user