version 1.4, 2005/03/06 22:17:33
|
version 1.12, 2005/03/08 00:28:48
|
|
|
return p - buf; | return p - buf; |
} | } |
| |
static int do_nonblocking_send ( int id, int first_time, SOCKET s, char *buf, int buflen, int sendlen ) |
|
{ |
|
char *last = buf + buflen; |
|
static char *p[MAX_CLIENTS]; |
|
int n; |
|
|
|
if ( first_time ) { |
|
p[id] = buf; |
|
} |
|
|
|
n = send ( s, p[id], min ( sendlen, last - p[id] ), 0 ); |
|
p[id] += n; |
|
|
|
return p[id] - buf; |
|
} |
|
|
|
static int do_nonblocking_recv ( int id, int first_time, SOCKET s, char *buf, int buflen, int recvlen ) |
|
{ |
|
char *last = buf + buflen; |
|
static char *p[MAX_CLIENTS]; |
|
int n; |
|
|
|
if ( first_time ) { |
|
p[id] = buf; |
|
} |
|
|
|
n = recv ( s, p[id], min ( recvlen, last - p[id] ), 0 ); |
|
p[id] += n; |
|
|
|
return p[id] - buf; |
|
} |
|
|
|
/* | /* |
* Call this routine right after thread startup. | * Call this routine right after thread startup. |
* SO_OPENTYPE must by 0, regardless what the server did. | * SO_OPENTYPE must by 0, regardless what the server did. |
|
|
test_params *gen = par->general; | test_params *gen = par->general; |
server_memory *mem; | server_memory *mem; |
int n_expected = gen->n_chunks * gen->chunk_size, tmp, i, | int n_expected = gen->n_chunks * gen->chunk_size, tmp, i, |
id = GetCurrentThreadId(), n_connections = 0; |
id = GetCurrentThreadId(), n_connections = 0, n_sent, n_recvd; |
char *p; | char *p; |
struct timeval zerotime = {0,0}; | struct timeval zerotime = {0,0}; |
fd_set fds_recv, fds_send, fds_openrecv, fds_opensend; | fd_set fds_recv, fds_send, fds_openrecv, fds_opensend; |
|
|
FD_ZERO ( &fds_recv ); | FD_ZERO ( &fds_recv ); |
FD_ZERO ( &fds_send ); | FD_ZERO ( &fds_send ); |
FD_ZERO ( &fds_opensend ); | FD_ZERO ( &fds_opensend ); |
|
|
FD_SET ( mem->s, &fds_openrecv ); | FD_SET ( mem->s, &fds_openrecv ); |
| |
while(1) | while(1) |
|
|
fds_recv = fds_openrecv; | fds_recv = fds_openrecv; |
fds_send = fds_opensend; | fds_send = fds_opensend; |
| |
select ( 0, &fds_recv, &fds_send, NULL, &zerotime ); |
wsa_ok ( select ( 0, &fds_recv, &fds_send, NULL, &zerotime ), SOCKET_ERROR !=, |
|
"select_server (%lx): select() failed: %d\n" ); |
| |
/* check for incoming requests */ | /* check for incoming requests */ |
if ( FD_ISSET ( mem->s, &fds_recv ) ) { | if ( FD_ISSET ( mem->s, &fds_recv ) ) { |
|
|
/* accept a single connection */ | /* accept a single connection */ |
tmp = sizeof ( mem->sock[i].peer ); | tmp = sizeof ( mem->sock[i].peer ); |
mem->sock[i].s = accept ( mem->s, (struct sockaddr*) &mem->sock[i].peer, &tmp ); | mem->sock[i].s = accept ( mem->s, (struct sockaddr*) &mem->sock[i].peer, &tmp ); |
wsa_ok ( mem->sock[i].s, INVALID_SOCKET !=, "select_server (%lx): accept failed: %d\n" ); |
wsa_ok ( mem->sock[i].s, INVALID_SOCKET !=, "select_server (%lx): accept() failed: %d\n" ); |
| |
ok ( mem->sock[i].peer.sin_addr.s_addr == inet_addr ( gen->inet_addr ), | ok ( mem->sock[i].peer.sin_addr.s_addr == inet_addr ( gen->inet_addr ), |
"select_server (%x): strange peer address\n", id ); | "select_server (%x): strange peer address\n", id ); |
|
|
| |
for ( i = 0; i < n_connections; i++ ) | for ( i = 0; i < n_connections; i++ ) |
{ | { |
if ( (mem->sock[i].n_recvd < n_expected) |
if ( ( mem->sock[i].n_recvd < n_expected ) && FD_ISSET( mem->sock[i].s, &fds_recv ) ) { |
&& FD_ISSET( mem->sock[i].s, &fds_recv ) ) { |
|
| |
/* Receive data & check it */ | /* Receive data & check it */ |
mem->sock[i].n_recvd = do_nonblocking_recv ( i, (mem->sock[i].n_recvd == 0), mem->sock[i].s, mem->sock[i].buf, n_expected, par->buflen ); |
n_recvd = recv ( mem->sock[i].s, mem->sock[i].buf + mem->sock[i].n_recvd, min ( n_expected - mem->sock[i].n_recvd, par->buflen ), 0 ); |
|
ok ( n_recvd != SOCKET_ERROR, "select_server (%x): error in recv(): %d\n", id, WSAGetLastError() ); |
|
mem->sock[i].n_recvd += n_recvd; |
| |
if ( mem->sock[i].n_recvd == n_expected ) { | if ( mem->sock[i].n_recvd == n_expected ) { |
p = test_buffer ( mem->sock[i].buf, gen->chunk_size, gen->n_chunks ); | p = test_buffer ( mem->sock[i].buf, gen->chunk_size, gen->n_chunks ); |
|
|
FD_CLR ( mem->sock[i].s, &fds_openrecv ); | FD_CLR ( mem->sock[i].s, &fds_openrecv ); |
} | } |
| |
} else if ( !FD_ISSET ( mem->sock[i].s, &fds_openrecv ) |
ok ( mem->sock[i].n_recvd <= n_expected, "select_server (%x): received too many bytes: %d\n", id, mem->sock[i].n_recvd ); |
&& FD_ISSET ( mem->sock[i].s, &fds_send ) ) { |
} |
|
|
|
if ( ( mem->sock[i].n_sent < n_expected ) && FD_ISSET ( mem->sock[i].s, &fds_send ) ) { |
|
|
/* Echo data back */ | /* Echo data back */ |
mem->sock[i].n_sent = do_nonblocking_send ( i, (mem->sock[i].n_sent == 0), mem->sock[i].s, mem->sock[i].buf, n_expected, par->buflen ); |
n_sent = send ( mem->sock[i].s, mem->sock[i].buf + mem->sock[i].n_sent, min ( n_expected - mem->sock[i].n_sent, par->buflen ), 0 ); |
|
ok ( n_sent != SOCKET_ERROR, "select_server (%x): error in send(): %d\n", id, WSAGetLastError() ); |
|
mem->sock[i].n_sent += n_sent; |
| |
if ( mem->sock[i].n_sent == n_expected ) { | if ( mem->sock[i].n_sent == n_expected ) { |
FD_CLR ( mem->sock[i].s, &fds_opensend ); | FD_CLR ( mem->sock[i].s, &fds_opensend ); |
} | } |
} else if( FD_ISSET( mem->sock[i].s, &fds_recv ) ) { |
|
ok ( 0, "select_server (%x): too many bytes read\n", id ); |
ok ( mem->sock[i].n_sent <= n_expected, "select_server (%x): sent too many bytes: %d\n", id, mem->sock[i].n_sent ); |
} | } |
} | } |
| |
/* check if all clients are done */ | /* check if all clients are done */ |
if ( ( fds_opensend.fd_count == 0 ) | if ( ( fds_opensend.fd_count == 0 ) |
|
&& ( fds_openrecv.fd_count == 1 ) /* initial socket that accepts clients */ |
&& ( n_connections == min ( gen->n_clients, MAX_CLIENTS ) ) ) { | && ( n_connections == min ( gen->n_clients, MAX_CLIENTS ) ) ) { |
break; | break; |
} | } |
|
|
128 | 128 |
} | } |
}, | }, |
/* Test 2: event-driven client, non-blocking server via select() */ |
/* Test 2: synchronous client, non-blocking server via select() */ |
{ | { |
{ | { |
STD_STREAM_SOCKET, | STD_STREAM_SOCKET, |