Full scrape, kinda tested

dynamic-accesslists
erdgeist 18 years ago
parent 67689057d4
commit 5f7c044b54

@ -71,6 +71,29 @@ int header_complete(struct http_data* r) {
return 0;
}
void sendmallocdata( int64 s, struct http_data *h, char * buffer, size_t size ) {
tai6464 t;
char *header;
size_t header_size;
if( !h ) { free( buffer); return; }
array_reset(&h->r);
header = malloc( SUCCESS_HTTP_HEADER_LENGTH );
if( !header ) { free( buffer ); return; }
header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size );
iob_reset( &h->batch );
iob_addbuf_free( &h->batch, header, header_size );
iob_addbuf_free( &h->batch, buffer, size );
// writeable sockets just have a tcp timeout
taia_uint(&t,0); io_timeout( s, t );
io_dontwantread( s );
io_wantwrite( s );
}
/* whoever sends data is not interested in its input-array */
void senddata(int64 s, struct http_data* h, char *buffer, size_t size ) {
size_t written_size;
@ -84,7 +107,6 @@ void senddata(int64 s, struct http_data* h, char *buffer, size_t size ) {
#endif
free(h); io_close( s );
} else {
/* here we would take a copy of the buffer and remember it */
char * outbuf = malloc( size - written_size );
tai6464 t;
@ -103,6 +125,8 @@ void senddata(int64 s, struct http_data* h, char *buffer, size_t size ) {
// writeable sockets just have a tcp timeout
taia_uint(&t,0); io_timeout( s, t );
io_dontwantread( s );
io_wantwrite( s );
}
}
@ -221,13 +245,20 @@ e400_param:
}
}
/* Scanned whole query string, wo */
if( !hash )
return httperror(s,h,"400 Invalid Request","This server only serves specific scrapes.");
/* Scanned whole query string, no hash means full scrape... you might want to limit that */
if( !hash ) {
char * reply;
reply_size = return_fullscrape_for_tracker( &reply );
if( reply_size )
return sendmallocdata( s, h, reply, reply_size );
/* Enough for http header + whole scrape string */
if( ( reply_size = return_scrape_for_torrent( hash, SUCCESS_HTTP_HEADER_LENGTH + static_scratch ) ) <= 0 )
goto e500;
} else {
/* Enough for http header + whole scrape string */
if( ( reply_size = return_scrape_for_torrent( hash, SUCCESS_HTTP_HEADER_LENGTH + static_scratch ) ) <= 0 )
goto e500;
}
break;
case 8:
if( byte_diff(data,8,"announce"))

@ -290,6 +290,42 @@ size_t return_peers_for_torrent( ot_torrent *torrent, unsigned int amount, char
return r - reply;
}
/* Fetch full scrape info for all torrents */
size_t return_fullscrape_for_tracker( char **reply ) {
int torrent_count = 0, i, j, k;
char* r;
time_t time_now = NOW;
for( i=0; i<256; ++i ) {
ot_vector *torrents_list = &all_torrents[i];
torrent_count += torrents_list->size;
}
r = *reply = malloc( 128*torrent_count );
if( !reply ) return 0;
memmove( r, "d5:filesd", 9 ); r += 9;
for( i=0; i<256; ++i ) {
ot_vector *torrents_list = &all_torrents[i];
for( j=0; j<torrents_list->size; ++j ) {
ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list;
ot_hash *hash =&( ((ot_torrent*)(torrents_list->data))[j] ).hash;
int peers = 0, seeds = 0;
clean_peerlist( time_now, peer_list );
for( k=0; k<OT_POOLS_COUNT; ++k ) {
peers += peer_list->peers[k].size;
seeds += peer_list->seed_count[k];
}
memmove( r, "20:", 3 ); r+=3;
memmove( r, hash, 20 ); r+=20;
r += sprintf( r, "d8:completei%de10:downloadedi%de10:incompletei%de", seeds, peer_list->downloaded, peers-seeds );
}
}
*r++='e'; *r++='e';
return r - *reply;
}
/* Fetches scrape info for a specific torrent */
size_t return_scrape_for_torrent( ot_hash *hash, char *reply ) {
char *r = reply;

@ -83,6 +83,7 @@ enum { STATS_MRTG, STATS_TOP5 };
ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer );
size_t return_peers_for_torrent( ot_torrent *torrent, unsigned int amount, char *reply );
size_t return_fullscrape_for_tracker( char **reply );
size_t return_scrape_for_torrent( ot_hash *hash, char *reply );
size_t return_stats_for_tracker( char *reply, int mode );
void remove_peer_from_torrent( ot_hash *hash, ot_peer *peer );

Loading…
Cancel
Save