sock.c 33.3 KB
Newer Older
1 2 3
/*
 * Server-side socket management
 *
4
 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
5
 *
6 7 8 9 10 11 12 13 14 15 16 17
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19
 *
20 21 22 23
 * FIXME: we use read|write access in all cases. Shouldn't we depend that
 * on the access of the current handle?
 */

24 25
#include "config.h"

26 27
#include <assert.h>
#include <fcntl.h>
28
#include <stdarg.h>
29 30 31 32 33 34
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/types.h>
35 36 37
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
Steven Edwards's avatar
Steven Edwards committed
38
#ifdef HAVE_SYS_IOCTL_H
39
#include <sys/ioctl.h>
Steven Edwards's avatar
Steven Edwards committed
40
#endif
41 42 43
#ifdef HAVE_SYS_FILIO_H
# include <sys/filio.h>
#endif
44 45 46
#include <time.h>
#include <unistd.h>

47 48
#include "ntstatus.h"
#define WIN32_NO_STATUS
49
#include "windef.h"
50
#include "winternl.h"
51
#include "winerror.h"
52

53
#include "process.h"
54
#include "file.h"
55 56 57
#include "handle.h"
#include "thread.h"
#include "request.h"
58
#include "user.h"
59

60 61 62 63 64 65 66 67 68 69 70
/* From winsock.h */
#define FD_MAX_EVENTS              10
#define FD_READ_BIT                0
#define FD_WRITE_BIT               1
#define FD_OOB_BIT                 2
#define FD_ACCEPT_BIT              3
#define FD_CONNECT_BIT             4
#define FD_CLOSE_BIT               5

/*
 * Define flags to be used with the WSAAsyncSelect() call.
71
 */
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
#define FD_READ                    0x00000001
#define FD_WRITE                   0x00000002
#define FD_OOB                     0x00000004
#define FD_ACCEPT                  0x00000008
#define FD_CONNECT                 0x00000010
#define FD_CLOSE                   0x00000020

/* internal per-socket flags */
#define FD_WINE_LISTENING          0x10000000
#define FD_WINE_NONBLOCKING        0x20000000
#define FD_WINE_CONNECTED          0x40000000
#define FD_WINE_RAW                0x80000000
#define FD_WINE_INTERNAL           0xFFFF0000

/* Constants for WSAIoctl() */
#define WSA_FLAG_OVERLAPPED        0x01
88

89 90 91
struct sock
{
    struct object       obj;         /* object header */
92
    struct fd          *fd;          /* socket file descriptor */
93 94 95 96
    unsigned int        state;       /* status bits */
    unsigned int        mask;        /* event mask */
    unsigned int        hmask;       /* held (blocked) events */
    unsigned int        pmask;       /* pending events */
97
    unsigned int        flags;       /* socket flags */
98
    int                 polling;     /* is socket being polled? */
99 100
    unsigned short      type;        /* socket type */
    unsigned short      family;      /* socket family */
101
    struct event       *event;       /* event object */
102 103
    user_handle_t       window;      /* window to send the message to */
    unsigned int        message;     /* message to send */
104
    obj_handle_t        wparam;      /* message wparam (socket handle) */
105
    int                 errors[FD_MAX_EVENTS]; /* event errors */
106
    struct sock        *deferred;    /* socket that waits for a deferred accept */
107 108
    struct async_queue *read_q;      /* queue for asynchronous reads */
    struct async_queue *write_q;     /* queue for asynchronous writes */
109 110 111 112
};

static void sock_dump( struct object *obj, int verbose );
static int sock_signaled( struct object *obj, struct thread *thread );
113
static struct fd *sock_get_fd( struct object *obj );
114
static void sock_destroy( struct object *obj );
115 116 117

static int sock_get_poll_events( struct fd *fd );
static void sock_poll_event( struct fd *fd, int event );
118
static enum server_fd_type sock_get_fd_type( struct fd *fd );
119
static void sock_queue_async( struct fd *fd, const async_data_t *data, int type, int count );
120
static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
121
static void sock_cancel_async( struct fd *fd, struct process *process, struct thread *thread, client_ptr_t iosb );
122

123
static int sock_get_ntstatus( int err );
124
static int sock_get_error( int err );
125 126 127 128
static void sock_set_error(void);

static const struct object_ops sock_ops =
{
129 130
    sizeof(struct sock),          /* size */
    sock_dump,                    /* dump */
131
    no_get_type,                  /* get_type */
132 133 134 135
    add_queue,                    /* add_queue */
    remove_queue,                 /* remove_queue */
    sock_signaled,                /* signaled */
    no_satisfied,                 /* satisfied */
136
    no_signal,                    /* signal */
137
    sock_get_fd,                  /* get_fd */
138
    default_fd_map_access,        /* map_access */
139 140
    default_get_sd,               /* get_sd */
    default_set_sd,               /* set_sd */
141
    no_lookup_name,               /* lookup_name */
142
    no_open_file,                 /* open_file */
143
    fd_close_handle,              /* close_handle */
144 145 146 147 148
    sock_destroy                  /* destroy */
};

static const struct fd_ops sock_fd_ops =
{
149 150 151
    sock_get_poll_events,         /* get_poll_events */
    sock_poll_event,              /* poll_event */
    no_flush,                     /* flush */
152
    sock_get_fd_type,             /* get_fd_type */
153
    default_fd_ioctl,             /* ioctl */
154
    sock_queue_async,             /* queue_async */
155
    sock_reselect_async,          /* reselect_async */
156
    sock_cancel_async             /* cancel_async */
157 158
};

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

/* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
 * we post messages if there are multiple events.  Used to send
 * messages.  The problem is if there is both a FD_CONNECT event and,
 * say, an FD_READ event available on the same socket, we want to
 * notify the app of the connect event first.  Otherwise it may
 * discard the read event because it thinks it hasn't connected yet.
 */
static const int event_bitorder[FD_MAX_EVENTS] =
{
    FD_CONNECT_BIT,
    FD_ACCEPT_BIT,
    FD_OOB_BIT,
    FD_WRITE_BIT,
    FD_READ_BIT,
    FD_CLOSE_BIT,
    6, 7, 8, 9  /* leftovers */
};

178 179 180 181 182 183 184 185 186 187 188
/* Flags that make sense only for SOCK_STREAM sockets */
#define STREAM_FLAG_MASK ((unsigned int) (FD_CONNECT | FD_ACCEPT | FD_WINE_LISTENING | FD_WINE_CONNECTED))

typedef enum {
    SOCK_SHUTDOWN_ERROR = -1,
    SOCK_SHUTDOWN_EOF = 0,
    SOCK_SHUTDOWN_POLLHUP = 1
} sock_shutdown_t;

static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;

189
static sock_shutdown_t sock_check_pollhup(void)
190 191 192 193 194 195
{
    sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
    int fd[2], n;
    struct pollfd pfd;
    char dummy;

196 197
    if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) goto out;
    if ( shutdown( fd[0], 1 ) ) goto out;
198 199 200 201 202

    pfd.fd = fd[1];
    pfd.events = POLLIN;
    pfd.revents = 0;

203 204
    /* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */
    n = poll( &pfd, 1, 1 );
205 206 207 208
    if ( n != 1 ) goto out; /* error or timeout */
    if ( pfd.revents & POLLHUP )
        ret = SOCK_SHUTDOWN_POLLHUP;
    else if ( pfd.revents & POLLIN &&
209
              read( fd[1], &dummy, 1 ) == 0 )
210 211 212
        ret = SOCK_SHUTDOWN_EOF;

out:
213 214
    close( fd[0] );
    close( fd[1] );
215 216 217 218 219
    return ret;
}

void sock_init(void)
{
220
    sock_shutdown_type = sock_check_pollhup();
221 222 223 224

    switch ( sock_shutdown_type )
    {
    case SOCK_SHUTDOWN_EOF:
225
        if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
226 227
        break;
    case SOCK_SHUTDOWN_POLLHUP:
228
        if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
229 230
        break;
    default:
231
        fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
232 233 234
        sock_shutdown_type = SOCK_SHUTDOWN_EOF;
    }
}
235

236
static int sock_reselect( struct sock *sock )
237
{
238
    int ev = sock_get_poll_events( sock->fd );
239

240
    if (debug_level)
241
        fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
242

243 244
    if (!sock->polling)  /* FIXME: should find a better way to do this */
    {
245
        /* previously unconnected socket, is this reselect supposed to connect it? */
246
        if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
247
        /* ok, it is, attach it to the wineserver's main poll loop */
248
        sock->polling = 1;
249
        allow_fd_caching( sock->fd );
250 251
    }
    /* update condition mask */
252
    set_fd_events( sock->fd, ev );
253 254 255
    return ev;
}

256
/* wake anybody waiting on the socket event or send the associated message */
257
static void sock_wake_up( struct sock *sock )
258 259 260
{
    unsigned int events = sock->pmask & sock->mask;
    int i;
261

262
    if ( !events ) return;
263

264 265 266 267 268 269 270
    if (sock->event)
    {
        if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
        set_event( sock->event );
    }
    if (sock->window)
    {
271
        if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
272 273 274 275 276
        for (i = 0; i < FD_MAX_EVENTS; i++)
        {
            int event = event_bitorder[i];
            if (sock->pmask & (1 << event))
            {
277
                lparam_t lparam = (1 << event) | (sock_get_error(sock->errors[event]) << 16);
278
                post_message( sock->window, sock->message, sock->wparam, lparam );
279 280 281 282 283
            }
        }
        sock->pmask = 0;
        sock_reselect( sock );
    }
284 285
}

286
static inline int sock_error( struct fd *fd )
287
{
288
    unsigned int optval = 0, optlen;
289

290
    optlen = sizeof(optval);
291
    getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
292
    return optval;
293 294
}

295
static int sock_dispatch_asyncs( struct sock *sock, int event, int error )
296 297 298
{
    if ( sock->flags & WSA_FLAG_OVERLAPPED )
    {
299
        if ( event & (POLLIN|POLLPRI) && async_waiting( sock->read_q ) )
300 301 302
        {
            if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
            async_wake_up( sock->read_q, STATUS_ALERTED );
303
            event &= ~(POLLIN|POLLPRI);
304
        }
305
        if ( event & POLLOUT && async_waiting( sock->write_q ) )
306 307 308
        {
            if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
            async_wake_up( sock->write_q, STATUS_ALERTED );
309
            event &= ~POLLOUT;
310
        }
311 312
        if ( event & (POLLERR|POLLHUP) )
        {
313 314
            int status = sock_get_ntstatus( error );

315
            if ( !(sock->state & FD_READ) )
316
                async_wake_up( sock->read_q, status );
317
            if ( !(sock->state & FD_WRITE) )
318
                async_wake_up( sock->write_q, status );
319
        }
320
    }
321
    return event;
322 323
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
static void sock_dispatch_events( struct sock *sock, int prevstate, int event, int error )
{
    if (prevstate & FD_CONNECT)
    {
        sock->pmask |= FD_CONNECT;
        sock->hmask |= FD_CONNECT;
        sock->errors[FD_CONNECT_BIT] = error;
        goto end;
    }
    if (prevstate & FD_WINE_LISTENING)
    {
        sock->pmask |= FD_ACCEPT;
        sock->hmask |= FD_ACCEPT;
        sock->errors[FD_ACCEPT_BIT] = error;
        goto end;
    }

    if (event & POLLIN)
    {
        sock->pmask |= FD_READ;
        sock->hmask |= FD_READ;
        sock->errors[FD_READ_BIT] = 0;
    }

    if (event & POLLOUT)
    {
        sock->pmask |= FD_WRITE;
        sock->hmask |= FD_WRITE;
        sock->errors[FD_WRITE_BIT] = 0;
    }

    if (event & POLLPRI)
    {
        sock->pmask |= FD_OOB;
        sock->hmask |= FD_OOB;
        sock->errors[FD_OOB_BIT] = 0;
    }

    if (event & (POLLERR|POLLHUP))
    {
        sock->pmask |= FD_CLOSE;
        sock->hmask |= FD_CLOSE;
        sock->errors[FD_CLOSE_BIT] = error;
    }
end:
    sock_wake_up( sock );
}

372
static void sock_poll_event( struct fd *fd, int event )
373
{
374
    struct sock *sock = get_fd_user( fd );
375
    int hangup_seen = 0;
376 377
    int prevstate = sock->state;
    int error = 0;
378

379
    assert( sock->obj.ops == &sock_ops );
380
    if (debug_level)
381
        fprintf(stderr, "socket %p select event: %x\n", sock, event);
382 383 384 385

    /* we may change event later, remove from loop here */
    if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 );

386
    if (sock->state & FD_CONNECT)
387
    {
388
        if (event & (POLLERR|POLLHUP))
389
        {
390
            /* we didn't get connected? */
391
            sock->state &= ~FD_CONNECT;
392 393
            event &= ~POLLOUT;
            error = sock_error( fd );
394
        }
395
        else if (event & POLLOUT)
396
        {
397 398
            /* we got connected */
            sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
399
            sock->state &= ~FD_CONNECT;
400
        }
401 402
    }
    else if (sock->state & FD_WINE_LISTENING)
403 404
    {
        /* listening */
405 406
        if (event & (POLLERR|POLLHUP))
            error = sock_error( fd );
407 408
    }
    else
409 410
    {
        /* normal data flow */
411
        if ( sock->type == SOCK_STREAM && ( event & POLLIN ) )
412
        {
413
            char dummy;
414
            int nr;
415 416 417

            /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
             * has been closed, so we need to check for it explicitly here */
418
            nr  = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK );
419
            if ( nr == 0 )
420
            {
421
                hangup_seen = 1;
422 423
                event &= ~POLLIN;
            }
424
            else if ( nr < 0 )
425
            {
426
                event &= ~POLLIN;
427 428
                /* EAGAIN can happen if an async recv() falls between the server's poll()
                   call and the invocation of this routine */
429
                if ( errno != EAGAIN )
430
                {
431 432
                    error = errno;
                    event |= POLLERR;
433
                    if ( debug_level )
434
                        fprintf( stderr, "recv error on socket %p: %d\n", sock, errno );
435 436
                }
            }
437
        }
438

439
        if ( (hangup_seen || event & (POLLHUP|POLLERR)) && (sock->state & (FD_READ|FD_WRITE)) )
440
        {
441
            error = error ? error : sock_error( fd );
442
            if ( (event & POLLERR) || ( sock_shutdown_type == SOCK_SHUTDOWN_EOF && (event & POLLHUP) ))
443
                sock->state &= ~FD_WRITE;
444 445
            sock->state &= ~FD_READ;

446
            if (debug_level)
447
                fprintf(stderr, "socket %p aborted by error %d, event: %x\n", sock, error, event);
448 449
        }

450 451
        if (hangup_seen)
            event |= POLLHUP;
452
    }
453

454
    event = sock_dispatch_asyncs( sock, event, error );
455
    sock_dispatch_events( sock, prevstate, event, error );
456

457 458 459
    /* if anyone is stupid enough to wait on the socket object itself,
     * maybe we should wake them up too, just in case? */
    wake_up( &sock->obj, 0 );
460 461

    sock_reselect( sock );
462 463 464 465 466 467
}

static void sock_dump( struct object *obj, int verbose )
{
    struct sock *sock = (struct sock *)obj;
    assert( obj->ops == &sock_ops );
468
    printf( "Socket fd=%p, state=%x, mask=%x, pending=%x, held=%x\n",
469
            sock->fd, sock->state,
470
            sock->mask, sock->pmask, sock->hmask );
471 472
}

473
static int sock_signaled( struct object *obj, struct thread *thread )
474
{
475
    struct sock *sock = (struct sock *)obj;
476 477
    assert( obj->ops == &sock_ops );

478
    return check_fd_events( sock->fd, sock_get_poll_events( sock->fd ) ) != 0;
479 480
}

481
static int sock_get_poll_events( struct fd *fd )
482
{
483
    struct sock *sock = get_fd_user( fd );
484 485
    unsigned int mask = sock->mask & ~sock->hmask;
    unsigned int smask = sock->state & mask;
486 487
    int ev = 0;

488
    assert( sock->obj.ops == &sock_ops );
489

490
    if (sock->state & FD_CONNECT)
491 492 493
        /* connecting, wait for writable */
        return POLLOUT;

494 495 496 497
    if ( async_queued( sock->read_q ) )
    {
        if ( async_waiting( sock->read_q ) ) ev |= POLLIN | POLLPRI;
    }
498
    else if (smask & FD_READ || (sock->state & FD_WINE_LISTENING && mask & FD_ACCEPT))
499
        ev |= POLLIN | POLLPRI;
500
    /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication for stream sockets. */
501 502
    else if ( sock->type == SOCK_STREAM && sock->state & FD_READ && mask & FD_CLOSE &&
              !(sock->hmask & FD_READ) )
503 504
        ev |= POLLIN;

505 506 507 508 509 510 511
    if ( async_queued( sock->write_q ) )
    {
        if ( async_waiting( sock->write_q ) ) ev |= POLLOUT;
    }
    else if (smask & FD_WRITE)
        ev |= POLLOUT;

512
    return ev;
513 514
}

515
static enum server_fd_type sock_get_fd_type( struct fd *fd )
516
{
517
    return FD_TYPE_SOCKET;
518 519
}

520
static void sock_queue_async( struct fd *fd, const async_data_t *data, int type, int count )
521
{
522
    struct sock *sock = get_fd_user( fd );
523
    struct async *async;
524
    struct async_queue *queue;
525

526
    assert( sock->obj.ops == &sock_ops );
527

528
    switch (type)
529 530
    {
    case ASYNC_TYPE_READ:
531 532
        if (!sock->read_q && !(sock->read_q = create_async_queue( sock->fd ))) return;
        queue = sock->read_q;
533 534
        break;
    case ASYNC_TYPE_WRITE:
535 536
        if (!sock->write_q && !(sock->write_q = create_async_queue( sock->fd ))) return;
        queue = sock->write_q;
537 538 539 540 541 542
        break;
    default:
        set_error( STATUS_INVALID_PARAMETER );
        return;
    }

543
    if ( ( !( sock->state & (FD_READ|FD_CONNECT|FD_WINE_LISTENING) ) && type == ASYNC_TYPE_READ  ) ||
544
         ( !( sock->state & (FD_WRITE|FD_CONNECT) ) && type == ASYNC_TYPE_WRITE ) )
545
    {
546
        set_error( STATUS_PIPE_DISCONNECTED );
547
        return;
548
    }
549 550 551

    if (!(async = create_async( current, queue, data ))) return;
    release_object( async );
552

553
    sock_reselect( sock );
554 555

    set_error( STATUS_PENDING );
556 557
}

558 559 560
static void sock_reselect_async( struct fd *fd, struct async_queue *queue )
{
    struct sock *sock = get_fd_user( fd );
561
    sock_reselect( sock );
562 563
}

564
static void sock_cancel_async( struct fd *fd, struct process *process, struct thread *thread, client_ptr_t iosb )
565 566
{
    struct sock *sock = get_fd_user( fd );
567
    int n = 0;
568 569
    assert( sock->obj.ops == &sock_ops );

570 571 572 573
    n += async_wake_up_by( sock->read_q, process, thread, iosb, STATUS_CANCELLED );
    n += async_wake_up_by( sock->write_q, process, thread, iosb, STATUS_CANCELLED );
    if (!n && iosb)
        set_error( STATUS_NOT_FOUND );
574 575
}

576 577 578 579 580 581
static struct fd *sock_get_fd( struct object *obj )
{
    struct sock *sock = (struct sock *)obj;
    return (struct fd *)grab_object( sock->fd );
}

582 583 584 585 586 587
static void sock_destroy( struct object *obj )
{
    struct sock *sock = (struct sock *)obj;
    assert( obj->ops == &sock_ops );

    /* FIXME: special socket shutdown stuff? */
588

589
    if ( sock->deferred )
590
        release_object( sock->deferred );
591

592 593
    free_async_queue( sock->read_q );
    free_async_queue( sock->write_q );
594
    if (sock->event) release_object( sock->event );
595 596 597 598 599 600
    if (sock->fd)
    {
        /* shut the socket down to force pending poll() calls in the client to return */
        shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
        release_object( sock->fd );
    }
601 602
}

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
static void init_sock(struct sock *sock)
{
    sock->state = 0;
    sock->mask    = 0;
    sock->hmask   = 0;
    sock->pmask   = 0;
    sock->polling = 0;
    sock->flags   = 0;
    sock->type    = 0;
    sock->family  = 0;
    sock->event   = NULL;
    sock->window  = 0;
    sock->message = 0;
    sock->wparam  = 0;
    sock->deferred = NULL;
    sock->read_q  = NULL;
    sock->write_q = NULL;
    memset( sock->errors, 0, sizeof(sock->errors) );
}

623
/* create a new and unconnected socket */
624
static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
625 626
{
    struct sock *sock;
627
    int sockfd;
628

629 630 631
    sockfd = socket( family, type, protocol );
    if (debug_level)
        fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
632 633
    if (sockfd == -1)
    {
634 635 636 637
        sock_set_error();
        return NULL;
    }
    fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
638 639 640 641 642
    if (!(sock = alloc_object( &sock_ops )))
    {
        close( sockfd );
        return NULL;
    }
643 644 645 646 647 648
    init_sock( sock );
    sock->state  = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
    sock->flags  = flags;
    sock->type   = type;
    sock->family = family;

649 650
    if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj,
                            (flags & WSA_FLAG_OVERLAPPED) ? 0 : FILE_SYNCHRONOUS_IO_NONALERT )))
651 652 653 654
    {
        release_object( sock );
        return NULL;
    }
655 656
    sock_reselect( sock );
    clear_error();
657 658 659
    return &sock->obj;
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
/* accepts a socket and inits it */
static int accept_new_fd( struct sock *sock )
{

    /* Try to accept(2). We can't be safe that this an already connected socket
     * or that accept() is allowed on it. In those cases we will get -1/errno
     * return.
     */
    int acceptfd;
    struct sockaddr saddr;
    unsigned int slen = sizeof(saddr);
    acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen);
    if (acceptfd == -1)
    {
        sock_set_error();
        return acceptfd;
    }

    fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
    return acceptfd;
}

682
/* accept a socket (creates a new fd) */
683
static struct sock *accept_socket( obj_handle_t handle )
684 685 686 687 688
{
    struct sock *acceptsock;
    struct sock *sock;
    int	acceptfd;

689
    sock = (struct sock *)get_handle_obj( current->process, handle, FILE_READ_DATA, &sock_ops );
690
    if (!sock)
691
        return NULL;
692

693 694
    if ( sock->deferred )
    {
695 696
        acceptsock = sock->deferred;
        sock->deferred = NULL;
697 698 699
    }
    else
    {
700
        if ((acceptfd = accept_new_fd( sock )) == -1)
701
        {
702 703 704
            release_object( sock );
            return NULL;
        }
705
        if (!(acceptsock = alloc_object( &sock_ops )))
706
        {
707
            close( acceptfd );
708 709 710
            release_object( sock );
            return NULL;
        }
711

712
        init_sock( acceptsock );
713 714 715 716 717
        /* newly created socket gets the same properties of the listening socket */
        acceptsock->state  = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
        if (sock->state & FD_WINE_NONBLOCKING)
            acceptsock->state |= FD_WINE_NONBLOCKING;
        acceptsock->mask    = sock->mask;
718 719
        acceptsock->type    = sock->type;
        acceptsock->family  = sock->family;
720 721 722 723
        acceptsock->window  = sock->window;
        acceptsock->message = sock->message;
        if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
        acceptsock->flags = sock->flags;
724 725
        if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
                                                    get_fd_options( sock->fd ) )))
726 727 728 729 730
        {
            release_object( acceptsock );
            release_object( sock );
            return NULL;
        }
731
    }
732 733 734
    clear_error();
    sock->pmask &= ~FD_ACCEPT;
    sock->hmask &= ~FD_ACCEPT;
735
    sock_reselect( sock );
736
    release_object( sock );
737
    return acceptsock;
738 739
}

740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
static int accept_into_socket( struct sock *sock, struct sock *acceptsock )
{
    int acceptfd;
    struct fd *newfd;
    if ( sock->deferred )
    {
        newfd = dup_fd_object( sock->deferred->fd, 0, 0,
                               get_fd_options( acceptsock->fd ) );
        if ( !newfd )
            return FALSE;

        set_fd_user( newfd, &sock_fd_ops, &acceptsock->obj );

        release_object( sock->deferred );
        sock->deferred = NULL;
    }
    else
    {
        if ((acceptfd = accept_new_fd( sock )) == -1)
            return FALSE;

        if (!(newfd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
                                            get_fd_options( acceptsock->fd ) )))
        {
            close( acceptfd );
            return FALSE;
        }
    }

    acceptsock->state  |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
    acceptsock->hmask   = 0;
    acceptsock->pmask   = 0;
    acceptsock->polling = 0;
    acceptsock->type    = sock->type;
    acceptsock->family  = sock->family;
    acceptsock->wparam  = 0;
    acceptsock->deferred = NULL;
777
    release_object( acceptsock->fd );
778 779 780 781 782 783 784 785 786 787
    acceptsock->fd = newfd;

    clear_error();
    sock->pmask &= ~FD_ACCEPT;
    sock->hmask &= ~FD_ACCEPT;
    sock_reselect( sock );

    return TRUE;
}

788
/* set the last error depending on errno */
789
static int sock_get_error( int err )
790
{
791
    switch (err)
792
    {
793 794
        case EINTR:             return WSAEINTR;
        case EBADF:             return WSAEBADF;
795
        case EPERM:
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
        case EACCES:            return WSAEACCES;
        case EFAULT:            return WSAEFAULT;
        case EINVAL:            return WSAEINVAL;
        case EMFILE:            return WSAEMFILE;
        case EWOULDBLOCK:       return WSAEWOULDBLOCK;
        case EINPROGRESS:       return WSAEINPROGRESS;
        case EALREADY:          return WSAEALREADY;
        case ENOTSOCK:          return WSAENOTSOCK;
        case EDESTADDRREQ:      return WSAEDESTADDRREQ;
        case EMSGSIZE:          return WSAEMSGSIZE;
        case EPROTOTYPE:        return WSAEPROTOTYPE;
        case ENOPROTOOPT:       return WSAENOPROTOOPT;
        case EPROTONOSUPPORT:   return WSAEPROTONOSUPPORT;
        case ESOCKTNOSUPPORT:   return WSAESOCKTNOSUPPORT;
        case EOPNOTSUPP:        return WSAEOPNOTSUPP;
        case EPFNOSUPPORT:      return WSAEPFNOSUPPORT;
        case EAFNOSUPPORT:      return WSAEAFNOSUPPORT;
        case EADDRINUSE:        return WSAEADDRINUSE;
        case EADDRNOTAVAIL:     return WSAEADDRNOTAVAIL;
        case ENETDOWN:          return WSAENETDOWN;
        case ENETUNREACH:       return WSAENETUNREACH;
        case ENETRESET:         return WSAENETRESET;
        case ECONNABORTED:      return WSAECONNABORTED;
819
        case EPIPE:
820 821 822 823 824 825 826 827 828 829 830 831 832
        case ECONNRESET:        return WSAECONNRESET;
        case ENOBUFS:           return WSAENOBUFS;
        case EISCONN:           return WSAEISCONN;
        case ENOTCONN:          return WSAENOTCONN;
        case ESHUTDOWN:         return WSAESHUTDOWN;
        case ETOOMANYREFS:      return WSAETOOMANYREFS;
        case ETIMEDOUT:         return WSAETIMEDOUT;
        case ECONNREFUSED:      return WSAECONNREFUSED;
        case ELOOP:             return WSAELOOP;
        case ENAMETOOLONG:      return WSAENAMETOOLONG;
        case EHOSTDOWN:         return WSAEHOSTDOWN;
        case EHOSTUNREACH:      return WSAEHOSTUNREACH;
        case ENOTEMPTY:         return WSAENOTEMPTY;
833
#ifdef EPROCLIM
834
        case EPROCLIM:          return WSAEPROCLIM;
835 836
#endif
#ifdef EUSERS
837
        case EUSERS:            return WSAEUSERS;
838 839
#endif
#ifdef EDQUOT
840
        case EDQUOT:            return WSAEDQUOT;
841 842
#endif
#ifdef ESTALE
843
        case ESTALE:            return WSAESTALE;
844 845
#endif
#ifdef EREMOTE
846
        case EREMOTE:           return WSAEREMOTE;
847
#endif
848 849

        case 0:                 return 0;
850 851 852 853
        default:
            errno = err;
            perror("wineserver: sock_get_error() can't map error");
            return WSAEFAULT;
854 855 856
    }
}

857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
static int sock_get_ntstatus( int err )
{
    switch ( err )
    {
        case EBADF:             return STATUS_INVALID_HANDLE;
        case EBUSY:             return STATUS_DEVICE_BUSY;
        case EPERM:
        case EACCES:            return STATUS_ACCESS_DENIED;
        case EFAULT:            return STATUS_NO_MEMORY;
        case EINVAL:            return STATUS_INVALID_PARAMETER;
        case ENFILE:
        case EMFILE:            return STATUS_TOO_MANY_OPENED_FILES;
        case EWOULDBLOCK:       return STATUS_CANT_WAIT;
        case EINPROGRESS:       return STATUS_PENDING;
        case EALREADY:          return STATUS_NETWORK_BUSY;
        case ENOTSOCK:          return STATUS_OBJECT_TYPE_MISMATCH;
        case EDESTADDRREQ:      return STATUS_INVALID_PARAMETER;
        case EMSGSIZE:          return STATUS_BUFFER_OVERFLOW;
        case EPROTONOSUPPORT:
        case ESOCKTNOSUPPORT:
        case EPFNOSUPPORT:
        case EAFNOSUPPORT:
        case EPROTOTYPE:        return STATUS_NOT_SUPPORTED;
        case ENOPROTOOPT:       return STATUS_INVALID_PARAMETER;
        case EOPNOTSUPP:        return STATUS_NOT_SUPPORTED;
        case EADDRINUSE:        return STATUS_ADDRESS_ALREADY_ASSOCIATED;
        case EADDRNOTAVAIL:     return STATUS_INVALID_PARAMETER;
        case ECONNREFUSED:      return STATUS_CONNECTION_REFUSED;
        case ESHUTDOWN:         return STATUS_PIPE_DISCONNECTED;
        case ENOTCONN:          return STATUS_CONNECTION_DISCONNECTED;
        case ETIMEDOUT:         return STATUS_IO_TIMEOUT;
        case ENETUNREACH:       return STATUS_NETWORK_UNREACHABLE;
889
        case EHOSTUNREACH:      return STATUS_HOST_UNREACHABLE;
890 891 892 893 894 895 896 897 898 899 900 901 902
        case ENETDOWN:          return STATUS_NETWORK_BUSY;
        case EPIPE:
        case ECONNRESET:        return STATUS_CONNECTION_RESET;
        case ECONNABORTED:      return STATUS_CONNECTION_ABORTED;

        case 0:                 return STATUS_SUCCESS;
        default:
            errno = err;
            perror("wineserver: sock_get_ntstatus() can't map error");
            return STATUS_UNSUCCESSFUL;
    }
}

903 904 905
/* set the last error depending on errno */
static void sock_set_error(void)
{
906
    set_error( sock_get_ntstatus( errno ) );
907 908
}

909 910 911 912 913
/* create a socket */
DECL_HANDLER(create_socket)
{
    struct object *obj;

914
    reply->handle = 0;
915
    if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
916
    {
917
        reply->handle = alloc_handle( current->process, obj, req->access, req->attributes );
918 919 920 921 922 923 924
        release_object( obj );
    }
}

/* accept a socket */
DECL_HANDLER(accept_socket)
{
925
    struct sock *sock;
926

927
    reply->handle = 0;
928
    if ((sock = accept_socket( req->lhandle )) != NULL)
929
    {
930
        reply->handle = alloc_handle( current->process, &sock->obj, req->access, req->attributes );
931 932 933
        sock->wparam = reply->handle;  /* wparam for message is the socket handle */
        sock_reselect( sock );
        release_object( &sock->obj );
934 935 936
    }
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
/* accept a socket into an initialized socket */
DECL_HANDLER(accept_into_socket)
{
    struct sock *sock, *acceptsock;
    const int all_attributes = FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|FILE_READ_DATA;

    if (!(sock = (struct sock *)get_handle_obj( current->process, req->lhandle,
                                                all_attributes, &sock_ops)))
        return;

    if (!(acceptsock = (struct sock *)get_handle_obj( current->process, req->ahandle,
                                                      all_attributes, &sock_ops)))
    {
        release_object( sock );
        return;
    }

    if (accept_into_socket( sock, acceptsock ))
    {
        acceptsock->wparam = req->ahandle;  /* wparam for message is the socket handle */
        sock_reselect( acceptsock );
    }
    release_object( acceptsock );
    release_object( sock );
}

963 964 965 966
/* set socket event parameters */
DECL_HANDLER(set_socket_event)
{
    struct sock *sock;
967
    struct event *old_event;
968

969 970
    if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
                                                FILE_WRITE_ATTRIBUTES, &sock_ops))) return;
971
    old_event = sock->event;
972
    sock->mask    = req->mask;
973
    sock->hmask   &= ~req->mask; /* re-enable held events */
974 975 976 977 978 979
    sock->event   = NULL;
    sock->window  = req->window;
    sock->message = req->msg;
    sock->wparam  = req->handle;  /* wparam is the socket handle */
    if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );

980
    if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
981

982
    sock_reselect( sock );
983

984
    if (sock->mask)
985
        sock->state |= FD_WINE_NONBLOCKING;
986

987
    /* if a network event is pending, signal the event object
988
       it is possible that FD_CONNECT or FD_ACCEPT network events has happened
989
       before a WSAEventSelect() was done on it.
990
       (when dealing with Asynchronous socket)  */
991
    sock_wake_up( sock );
992 993

    if (old_event) release_object( old_event ); /* we're through with it */
994 995 996 997 998 999 1000
    release_object( &sock->obj );
}

/* get socket event parameters */
DECL_HANDLER(get_socket_event)
{
    struct sock *sock;
1001 1002
    int i;
    int errors[FD_MAX_EVENTS];
1003

1004
    sock = (struct sock *)get_handle_obj( current->process, req->handle, FILE_READ_ATTRIBUTES, &sock_ops );
1005 1006
    if (!sock)
    {
1007 1008 1009 1010
        reply->mask  = 0;
        reply->pmask = 0;
        reply->state = 0;
        return;
1011
    }
1012 1013 1014
    reply->mask  = sock->mask;
    reply->pmask = sock->pmask;
    reply->state = sock->state;
1015 1016 1017 1018
    for (i = 0; i < FD_MAX_EVENTS; i++)
        errors[i] = sock_get_ntstatus(sock->errors[i]);

    set_reply_data( errors, min( get_reply_max_size(), sizeof(errors) ));
1019

1020 1021
    if (req->service)
    {
1022
        if (req->c_event)
1023
        {
1024 1025 1026
            struct event *cevent = get_event_obj( current->process, req->c_event,
                                                  EVENT_MODIFY_STATE );
            if (cevent)
1027 1028 1029 1030
            {
                reset_event( cevent );
                release_object( cevent );
            }
1031
        }
1032 1033
        sock->pmask = 0;
        sock_reselect( sock );
1034 1035 1036 1037 1038 1039 1040 1041 1042
    }
    release_object( &sock->obj );
}

/* re-enable pending socket events */
DECL_HANDLER(enable_socket_event)
{
    struct sock *sock;

1043
    if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
1044
                                               FILE_WRITE_ATTRIBUTES, &sock_ops)))
1045 1046
        return;

1047 1048 1049
    /* for event-based notification, windows erases stale events */
    sock->pmask &= ~req->mask;

1050 1051 1052
    sock->hmask &= ~req->mask;
    sock->state |= req->sstate;
    sock->state &= ~req->cstate;
1053
    if ( sock->type != SOCK_STREAM ) sock->state &= ~STREAM_FLAG_MASK;
1054

1055
    sock_reselect( sock );
1056

1057 1058
    release_object( &sock->obj );
}
1059 1060 1061 1062 1063

DECL_HANDLER(set_socket_deferred)
{
    struct sock *sock, *acceptsock;

1064
    sock=(struct sock *)get_handle_obj( current->process, req->handle, FILE_WRITE_ATTRIBUTES, &sock_ops );
1065 1066
    if ( !sock )
        return;
1067

1068
    acceptsock = (struct sock *)get_handle_obj( current->process, req->deferred, 0, &sock_ops );
1069 1070
    if ( !acceptsock )
    {
1071
        release_object( sock );
1072 1073 1074
        return;
    }
    sock->deferred = acceptsock;
1075
    release_object( sock );
1076
}