sock.c 31.8 KB
Newer Older
1 2 3
/*
 * Server-side socket management
 *
4
 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
5
 *
6 7 8 9 10 11 12 13 14 15 16 17
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19
 *
20 21 22 23
 * FIXME: we use read|write access in all cases. Shouldn't we depend that
 * on the access of the current handle?
 */

24 25
#include "config.h"

26 27
#include <assert.h>
#include <fcntl.h>
28
#include <stdarg.h>
29 30 31 32
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
33 34 35
#ifdef HAVE_SYS_ERRNO_H
# include <sys/errno.h>
#endif
36 37
#include <sys/time.h>
#include <sys/types.h>
38 39 40
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
Steven Edwards's avatar
Steven Edwards committed
41
#ifdef HAVE_SYS_IOCTL_H
42
#include <sys/ioctl.h>
Steven Edwards's avatar
Steven Edwards committed
43
#endif
44 45 46
#ifdef HAVE_SYS_FILIO_H
# include <sys/filio.h>
#endif
47 48 49
#include <time.h>
#include <unistd.h>

50 51
#include "ntstatus.h"
#define WIN32_NO_STATUS
52
#include "windef.h"
53
#include "winternl.h"
54
#include "winerror.h"
55

56
#include "process.h"
57
#include "file.h"
58 59 60
#include "handle.h"
#include "thread.h"
#include "request.h"
61
#include "user.h"
62

63 64 65 66 67 68 69 70 71 72 73
/* From winsock.h */
#define FD_MAX_EVENTS              10
#define FD_READ_BIT                0
#define FD_WRITE_BIT               1
#define FD_OOB_BIT                 2
#define FD_ACCEPT_BIT              3
#define FD_CONNECT_BIT             4
#define FD_CLOSE_BIT               5

/*
 * Define flags to be used with the WSAAsyncSelect() call.
74
 */
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#define FD_READ                    0x00000001
#define FD_WRITE                   0x00000002
#define FD_OOB                     0x00000004
#define FD_ACCEPT                  0x00000008
#define FD_CONNECT                 0x00000010
#define FD_CLOSE                   0x00000020

/* internal per-socket flags */
#define FD_WINE_LISTENING          0x10000000
#define FD_WINE_NONBLOCKING        0x20000000
#define FD_WINE_CONNECTED          0x40000000
#define FD_WINE_RAW                0x80000000
#define FD_WINE_INTERNAL           0xFFFF0000

/* Constants for WSAIoctl() */
#define WSA_FLAG_OVERLAPPED        0x01
91

92 93 94
struct sock
{
    struct object       obj;         /* object header */
95
    struct fd          *fd;          /* socket file descriptor */
96 97 98 99
    unsigned int        state;       /* status bits */
    unsigned int        mask;        /* event mask */
    unsigned int        hmask;       /* held (blocked) events */
    unsigned int        pmask;       /* pending events */
100
    unsigned int        flags;       /* socket flags */
101
    int                 polling;     /* is socket being polled? */
102 103
    unsigned short      type;        /* socket type */
    unsigned short      family;      /* socket family */
104
    struct event       *event;       /* event object */
105 106
    user_handle_t       window;      /* window to send the message to */
    unsigned int        message;     /* message to send */
107
    obj_handle_t        wparam;      /* message wparam (socket handle) */
108
    int                 errors[FD_MAX_EVENTS]; /* event errors */
109
    struct sock        *deferred;    /* socket that waits for a deferred accept */
110 111
    struct async_queue *read_q;      /* queue for asynchronous reads */
    struct async_queue *write_q;     /* queue for asynchronous writes */
112 113 114 115
};

static void sock_dump( struct object *obj, int verbose );
static int sock_signaled( struct object *obj, struct thread *thread );
116
static struct fd *sock_get_fd( struct object *obj );
117
static void sock_destroy( struct object *obj );
118 119 120

static int sock_get_poll_events( struct fd *fd );
static void sock_poll_event( struct fd *fd, int event );
121
static enum server_fd_type sock_get_fd_type( struct fd *fd );
122
static void sock_queue_async( struct fd *fd, const async_data_t *data, int type, int count );
123
static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
124
static void sock_cancel_async( struct fd *fd, struct process *process, struct thread *thread, client_ptr_t iosb );
125

126
static int sock_get_ntstatus( int err );
127
static int sock_get_error( int err );
128 129 130 131
static void sock_set_error(void);

static const struct object_ops sock_ops =
{
132 133
    sizeof(struct sock),          /* size */
    sock_dump,                    /* dump */
134
    no_get_type,                  /* get_type */
135 136 137 138
    add_queue,                    /* add_queue */
    remove_queue,                 /* remove_queue */
    sock_signaled,                /* signaled */
    no_satisfied,                 /* satisfied */
139
    no_signal,                    /* signal */
140
    sock_get_fd,                  /* get_fd */
141
    default_fd_map_access,        /* map_access */
142 143
    default_get_sd,               /* get_sd */
    default_set_sd,               /* set_sd */
144
    no_lookup_name,               /* lookup_name */
145
    no_open_file,                 /* open_file */
146
    fd_close_handle,              /* close_handle */
147 148 149 150 151
    sock_destroy                  /* destroy */
};

static const struct fd_ops sock_fd_ops =
{
152 153 154
    sock_get_poll_events,         /* get_poll_events */
    sock_poll_event,              /* poll_event */
    no_flush,                     /* flush */
155
    sock_get_fd_type,             /* get_file_info */
156
    default_fd_ioctl,             /* ioctl */
157
    sock_queue_async,             /* queue_async */
158
    sock_reselect_async,          /* reselect_async */
159
    sock_cancel_async             /* cancel_async */
160 161
};

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

/* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
 * we post messages if there are multiple events.  Used to send
 * messages.  The problem is if there is both a FD_CONNECT event and,
 * say, an FD_READ event available on the same socket, we want to
 * notify the app of the connect event first.  Otherwise it may
 * discard the read event because it thinks it hasn't connected yet.
 */
static const int event_bitorder[FD_MAX_EVENTS] =
{
    FD_CONNECT_BIT,
    FD_ACCEPT_BIT,
    FD_OOB_BIT,
    FD_WRITE_BIT,
    FD_READ_BIT,
    FD_CLOSE_BIT,
    6, 7, 8, 9  /* leftovers */
};

181 182 183 184 185 186 187 188 189 190 191
/* Flags that make sense only for SOCK_STREAM sockets */
#define STREAM_FLAG_MASK ((unsigned int) (FD_CONNECT | FD_ACCEPT | FD_WINE_LISTENING | FD_WINE_CONNECTED))

typedef enum {
    SOCK_SHUTDOWN_ERROR = -1,
    SOCK_SHUTDOWN_EOF = 0,
    SOCK_SHUTDOWN_POLLHUP = 1
} sock_shutdown_t;

static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;

192
static sock_shutdown_t sock_check_pollhup(void)
193 194 195 196 197 198
{
    sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
    int fd[2], n;
    struct pollfd pfd;
    char dummy;

199 200
    if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) goto out;
    if ( shutdown( fd[0], 1 ) ) goto out;
201 202 203 204 205

    pfd.fd = fd[1];
    pfd.events = POLLIN;
    pfd.revents = 0;

206
    n = poll( &pfd, 1, 0 );
207 208 209 210
    if ( n != 1 ) goto out; /* error or timeout */
    if ( pfd.revents & POLLHUP )
        ret = SOCK_SHUTDOWN_POLLHUP;
    else if ( pfd.revents & POLLIN &&
211
              read( fd[1], &dummy, 1 ) == 0 )
212 213 214
        ret = SOCK_SHUTDOWN_EOF;

out:
215 216
    close( fd[0] );
    close( fd[1] );
217 218 219 220 221
    return ret;
}

void sock_init(void)
{
222
    sock_shutdown_type = sock_check_pollhup();
223 224 225 226

    switch ( sock_shutdown_type )
    {
    case SOCK_SHUTDOWN_EOF:
227
        if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
228 229
        break;
    case SOCK_SHUTDOWN_POLLHUP:
230
        if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
231 232
        break;
    default:
233
        fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
234 235 236
        sock_shutdown_type = SOCK_SHUTDOWN_EOF;
    }
}
237

238
static int sock_reselect( struct sock *sock )
239
{
240
    int ev = sock_get_poll_events( sock->fd );
241

242
    if (debug_level)
243
        fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
244

245 246
    if (!sock->polling)  /* FIXME: should find a better way to do this */
    {
247
        /* previously unconnected socket, is this reselect supposed to connect it? */
248
        if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
249
        /* ok, it is, attach it to the wineserver's main poll loop */
250
        sock->polling = 1;
251 252
    }
    /* update condition mask */
253
    set_fd_events( sock->fd, ev );
254 255 256 257 258
    return ev;
}

/* After POLLHUP is received, the socket will no longer be in the main select loop.
   This function is used to signal pending events nevertheless */
259
static void sock_try_event( struct sock *sock, int event )
260
{
261
    event = check_fd_events( sock->fd, event );
262
    if (event)
263
    {
264 265
        if ( debug_level ) fprintf( stderr, "sock_try_event: %x\n", event );
        sock_poll_event( sock->fd, event );
266
    }
267
}
268

269
/* wake anybody waiting on the socket event or send the associated message */
270
static void sock_wake_up( struct sock *sock, int pollev )
271 272 273
{
    unsigned int events = sock->pmask & sock->mask;
    int i;
274
    int async_active = 0;
275

276
    if ( pollev & (POLLIN|POLLPRI|POLLERR|POLLHUP) && async_waiting( sock->read_q ))
277
    {
278 279 280 281
        if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
        async_wake_up( sock->read_q, STATUS_ALERTED );
        async_active = 1;
    }
282
    if ( pollev & (POLLOUT|POLLERR|POLLHUP) && async_waiting( sock->write_q ))
283 284 285 286
    {
        if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
        async_wake_up( sock->write_q, STATUS_ALERTED );
        async_active = 1;
287 288 289 290 291 292
    }

    /* Do not signal events if there are still pending asynchronous IO requests */
    /* We need this to delay FD_CLOSE events until all pending overlapped requests are processed */
    if ( !events || async_active ) return;

293 294 295 296 297 298 299
    if (sock->event)
    {
        if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
        set_event( sock->event );
    }
    if (sock->window)
    {
300
        if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
301 302 303 304 305
        for (i = 0; i < FD_MAX_EVENTS; i++)
        {
            int event = event_bitorder[i];
            if (sock->pmask & (1 << event))
            {
306
                lparam_t lparam = (1 << event) | (sock_get_error(sock->errors[event]) << 16);
307
                post_message( sock->window, sock->message, sock->wparam, lparam );
308 309 310 311 312
            }
        }
        sock->pmask = 0;
        sock_reselect( sock );
    }
313 314
}

315
static inline int sock_error( struct fd *fd )
316
{
317
    unsigned int optval = 0, optlen;
318

319
    optlen = sizeof(optval);
320
    getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
321
    return optval;
322 323
}

324
static void sock_poll_event( struct fd *fd, int event )
325
{
326
    struct sock *sock = get_fd_user( fd );
327
    int hangup_seen = 0;
328

329
    assert( sock->obj.ops == &sock_ops );
330
    if (debug_level)
331
        fprintf(stderr, "socket %p select event: %x\n", sock, event);
332 333 334 335

    /* we may change event later, remove from loop here */
    if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 );

336
    if (sock->state & FD_CONNECT)
337 338
    {
        /* connecting */
339
        if (event & POLLOUT)
340 341
        {
            /* we got connected */
342 343
            sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
            sock->state &= ~FD_CONNECT;
344 345
            sock->pmask |= FD_CONNECT;
            sock->errors[FD_CONNECT_BIT] = 0;
346
            if (debug_level)
347
                fprintf(stderr, "socket %p connection success\n", sock);
348
        }
349
        else if (event & (POLLERR|POLLHUP))
350 351
        {
            /* we didn't get connected? */
352
            sock->state &= ~FD_CONNECT;
353
            sock->pmask |= FD_CONNECT;
354
            sock->errors[FD_CONNECT_BIT] = sock_error( fd );
355
            if (debug_level)
356
                fprintf(stderr, "socket %p connection failure\n", sock);
357
        }
358 359
    }
    else if (sock->state & FD_WINE_LISTENING)
360 361
    {
        /* listening */
362
        if (event & POLLIN)
363 364 365 366 367 368
        {
            /* incoming connection */
            sock->pmask |= FD_ACCEPT;
            sock->errors[FD_ACCEPT_BIT] = 0;
            sock->hmask |= FD_ACCEPT;
        }
369
        else if (event & (POLLERR|POLLHUP))
370 371 372
        {
            /* failed incoming connection? */
            sock->pmask |= FD_ACCEPT;
373
            sock->errors[FD_ACCEPT_BIT] = sock_error( fd );
374 375
            sock->hmask |= FD_ACCEPT;
        }
376 377
    }
    else
378 379
    {
        /* normal data flow */
380
        if ( sock->type == SOCK_STREAM && ( event & POLLIN ) )
381
        {
382
            char dummy;
383
            int nr;
384 385 386

            /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
             * has been closed, so we need to check for it explicitly here */
387
            nr  = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK );
388
            if ( nr == 0 )
389
            {
390
                hangup_seen = 1;
391 392
                event &= ~POLLIN;
            }
393
            else if ( nr < 0 )
394
            {
395
                event &= ~POLLIN;
396 397
                /* EAGAIN can happen if an async recv() falls between the server's poll()
                   call and the invocation of this routine */
398
                if ( errno != EAGAIN )
399 400
                {
                    if ( debug_level )
401
                        fprintf( stderr, "recv error on socket %p: %d\n", sock, errno );
402
                    event |= POLLERR;
403 404
                }
            }
405
        }
406

407
        if ( event & POLLIN )
408 409
        {
            sock->pmask |= FD_READ;
410
            sock->hmask |= FD_READ;
411 412
            sock->errors[FD_READ_BIT] = 0;
            if (debug_level)
413
                fprintf(stderr, "socket %p is readable\n", sock );
414
        }
415

416
        if (event & POLLOUT)
417 418 419 420
        {
            sock->pmask |= FD_WRITE;
            sock->hmask |= FD_WRITE;
            sock->errors[FD_WRITE_BIT] = 0;
421
            if (debug_level)
422
                fprintf(stderr, "socket %p is writable\n", sock);
423 424 425 426 427
        }
        if (event & POLLPRI)
        {
            sock->pmask |= FD_OOB;
            sock->hmask |= FD_OOB;
428
            sock->errors[FD_OOB_BIT] = 0;
429
            if (debug_level)
430
                fprintf(stderr, "socket %p got OOB data\n", sock);
431
        }
432

433
        if ( (hangup_seen || event & (POLLHUP|POLLERR)) && (sock->state & (FD_READ|FD_WRITE)) )
434
        {
435
            sock->errors[FD_CLOSE_BIT] = sock_error( fd );
436
            if ( (event & POLLERR) || ( sock_shutdown_type == SOCK_SHUTDOWN_EOF && (event & POLLHUP) ))
437
                sock->state &= ~FD_WRITE;
438 439
            sock->state &= ~FD_READ;

440
            sock->pmask |= FD_CLOSE;
441
            sock->hmask |= FD_CLOSE;
442
            if (debug_level)
443 444
                fprintf(stderr, "socket %p aborted by error %d, event: %x - removing from select loop\n",
                        sock, sock->errors[FD_CLOSE_BIT], event);
445 446
        }

447 448
        if (hangup_seen)
            event |= POLLHUP;
449
    }
450

451
    /* wake up anyone waiting for whatever just happened */
452
    if ( sock->pmask & sock->mask || sock->flags & WSA_FLAG_OVERLAPPED ) sock_wake_up( sock, event );
453 454 455 456

    /* if anyone is stupid enough to wait on the socket object itself,
     * maybe we should wake them up too, just in case? */
    wake_up( &sock->obj, 0 );
457 458

    sock_reselect( sock );
459 460 461 462 463 464
}

static void sock_dump( struct object *obj, int verbose )
{
    struct sock *sock = (struct sock *)obj;
    assert( obj->ops == &sock_ops );
465
    printf( "Socket fd=%p, state=%x, mask=%x, pending=%x, held=%x\n",
466
            sock->fd, sock->state,
467
            sock->mask, sock->pmask, sock->hmask );
468 469
}

470
static int sock_signaled( struct object *obj, struct thread *thread )
471
{
472
    struct sock *sock = (struct sock *)obj;
473 474
    assert( obj->ops == &sock_ops );

475
    return check_fd_events( sock->fd, sock_get_poll_events( sock->fd ) ) != 0;
476 477
}

478
static int sock_get_poll_events( struct fd *fd )
479
{
480
    struct sock *sock = get_fd_user( fd );
481 482 483
    unsigned int mask = sock->mask & sock->state & ~sock->hmask;
    int ev = 0;

484
    assert( sock->obj.ops == &sock_ops );
485

486
    if (sock->state & FD_CONNECT)
487 488
        /* connecting, wait for writable */
        return POLLOUT;
489
    if (sock->state & FD_WINE_LISTENING)
490 491 492
        /* listening, wait for readable */
        return (sock->hmask & FD_ACCEPT) ? 0 : POLLIN;

493 494
    if (mask & FD_READ  || async_waiting( sock->read_q )) ev |= POLLIN | POLLPRI;
    if (mask & FD_WRITE || async_waiting( sock->write_q )) ev |= POLLOUT;
495
    /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication for stream sockets. */
496 497
    if ( sock->type == SOCK_STREAM && ( sock->mask & ~sock->hmask & FD_CLOSE) &&
         !(sock->hmask & FD_READ) && sock->state & FD_READ )
498 499
        ev |= POLLIN;

500
    return ev;
501 502
}

503
static enum server_fd_type sock_get_fd_type( struct fd *fd )
504
{
505
    return FD_TYPE_SOCKET;
506 507
}

508
static void sock_queue_async( struct fd *fd, const async_data_t *data, int type, int count )
509
{
510
    struct sock *sock = get_fd_user( fd );
511
    struct async_queue *queue;
512
    int pollev;
513

514
    assert( sock->obj.ops == &sock_ops );
515

516
    switch (type)
517 518
    {
    case ASYNC_TYPE_READ:
519 520
        if (!sock->read_q && !(sock->read_q = create_async_queue( sock->fd ))) return;
        queue = sock->read_q;
521 522
        break;
    case ASYNC_TYPE_WRITE:
523 524
        if (!sock->write_q && !(sock->write_q = create_async_queue( sock->fd ))) return;
        queue = sock->write_q;
525 526 527 528 529 530
        break;
    default:
        set_error( STATUS_INVALID_PARAMETER );
        return;
    }

531 532
    if ( ( !( sock->state & FD_READ ) && type == ASYNC_TYPE_READ  ) ||
         ( !( sock->state & FD_WRITE ) && type == ASYNC_TYPE_WRITE ) )
533
    {
534 535 536 537
        set_error( STATUS_PIPE_DISCONNECTED );
    }
    else
    {
538 539 540
        struct async *async;
        if (!(async = create_async( current, queue, data ))) return;
        release_object( async );
541
        set_error( STATUS_PENDING );
542 543
    }

544 545 546 547
    pollev = sock_reselect( sock );
    if ( pollev ) sock_try_event( sock, pollev );
}

548 549 550 551 552 553 554
static void sock_reselect_async( struct fd *fd, struct async_queue *queue )
{
    struct sock *sock = get_fd_user( fd );
    int events = sock_reselect( sock );
    if (events) sock_try_event( sock, events );
}

555
static void sock_cancel_async( struct fd *fd, struct process *process, struct thread *thread, client_ptr_t iosb )
556 557
{
    struct sock *sock = get_fd_user( fd );
558
    int n = 0;
559 560
    assert( sock->obj.ops == &sock_ops );

561 562 563 564
    n += async_wake_up_by( sock->read_q, process, thread, iosb, STATUS_CANCELLED );
    n += async_wake_up_by( sock->write_q, process, thread, iosb, STATUS_CANCELLED );
    if (!n && iosb)
        set_error( STATUS_NOT_FOUND );
565 566
}

567 568 569 570 571 572
static struct fd *sock_get_fd( struct object *obj )
{
    struct sock *sock = (struct sock *)obj;
    return (struct fd *)grab_object( sock->fd );
}

573 574 575 576 577 578
static void sock_destroy( struct object *obj )
{
    struct sock *sock = (struct sock *)obj;
    assert( obj->ops == &sock_ops );

    /* FIXME: special socket shutdown stuff? */
579

580
    if ( sock->deferred )
581
        release_object( sock->deferred );
582

583 584
    free_async_queue( sock->read_q );
    free_async_queue( sock->write_q );
585
    if (sock->event) release_object( sock->event );
586 587 588 589 590 591
    if (sock->fd)
    {
        /* shut the socket down to force pending poll() calls in the client to return */
        shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
        release_object( sock->fd );
    }
592 593 594
}

/* create a new and unconnected socket */
595
static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
596 597
{
    struct sock *sock;
598
    int sockfd;
599

600 601 602
    sockfd = socket( family, type, protocol );
    if (debug_level)
        fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
603 604
    if (sockfd == -1)
    {
605 606 607 608
        sock_set_error();
        return NULL;
    }
    fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
609 610 611 612 613
    if (!(sock = alloc_object( &sock_ops )))
    {
        close( sockfd );
        return NULL;
    }
614
    sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
615 616 617
    sock->mask    = 0;
    sock->hmask   = 0;
    sock->pmask   = 0;
618
    sock->polling = 0;
619
    sock->flags   = flags;
620 621
    sock->type    = type;
    sock->family  = family;
622 623 624 625
    sock->event   = NULL;
    sock->window  = 0;
    sock->message = 0;
    sock->wparam  = 0;
626
    sock->deferred = NULL;
627 628
    sock->read_q  = NULL;
    sock->write_q = NULL;
629
    memset( sock->errors, 0, sizeof(sock->errors) );
630 631
    if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj,
                            (flags & WSA_FLAG_OVERLAPPED) ? 0 : FILE_SYNCHRONOUS_IO_NONALERT )))
632 633 634 635
    {
        release_object( sock );
        return NULL;
    }
636 637
    sock_reselect( sock );
    clear_error();
638 639 640 641
    return &sock->obj;
}

/* accept a socket (creates a new fd) */
642
static struct sock *accept_socket( obj_handle_t handle )
643 644 645 646 647 648
{
    struct sock *acceptsock;
    struct sock *sock;
    int	acceptfd;
    struct sockaddr	saddr;

649
    sock = (struct sock *)get_handle_obj( current->process, handle, FILE_READ_DATA, &sock_ops );
650 651 652
    if (!sock)
    	return NULL;

653 654
    if ( sock->deferred )
    {
655 656
        acceptsock = sock->deferred;
        sock->deferred = NULL;
657 658 659
    }
    else
    {
660 661 662 663 664

        /* Try to accept(2). We can't be safe that this an already connected socket
         * or that accept() is allowed on it. In those cases we will get -1/errno
         * return.
         */
665
        unsigned int slen = sizeof(saddr);
666
        acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen);
667 668
        if (acceptfd==-1)
        {
669 670 671 672
            sock_set_error();
            release_object( sock );
            return NULL;
        }
673
        if (!(acceptsock = alloc_object( &sock_ops )))
674
        {
675
            close( acceptfd );
676 677 678
            release_object( sock );
            return NULL;
        }
679

680 681 682 683 684 685 686 687
        /* newly created socket gets the same properties of the listening socket */
        fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
        acceptsock->state  = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
        if (sock->state & FD_WINE_NONBLOCKING)
            acceptsock->state |= FD_WINE_NONBLOCKING;
        acceptsock->mask    = sock->mask;
        acceptsock->hmask   = 0;
        acceptsock->pmask   = 0;
688
        acceptsock->polling = 0;
689 690
        acceptsock->type    = sock->type;
        acceptsock->family  = sock->family;
691 692 693 694 695 696
        acceptsock->event   = NULL;
        acceptsock->window  = sock->window;
        acceptsock->message = sock->message;
        acceptsock->wparam  = 0;
        if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
        acceptsock->flags = sock->flags;
697
        acceptsock->deferred = NULL;
698 699
        acceptsock->read_q  = NULL;
        acceptsock->write_q = NULL;
700
        memset( acceptsock->errors, 0, sizeof(acceptsock->errors) );
701 702
        if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
                                                    get_fd_options( sock->fd ) )))
703 704 705 706 707
        {
            release_object( acceptsock );
            release_object( sock );
            return NULL;
        }
708
    }
709 710 711
    clear_error();
    sock->pmask &= ~FD_ACCEPT;
    sock->hmask &= ~FD_ACCEPT;
712
    sock_reselect( sock );
713
    release_object( sock );
714
    return acceptsock;
715 716 717
}

/* set the last error depending on errno */
718
static int sock_get_error( int err )
719
{
720
    switch (err)
721
    {
722 723
        case EINTR:             return WSAEINTR;
        case EBADF:             return WSAEBADF;
724
        case EPERM:
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
        case EACCES:            return WSAEACCES;
        case EFAULT:            return WSAEFAULT;
        case EINVAL:            return WSAEINVAL;
        case EMFILE:            return WSAEMFILE;
        case EWOULDBLOCK:       return WSAEWOULDBLOCK;
        case EINPROGRESS:       return WSAEINPROGRESS;
        case EALREADY:          return WSAEALREADY;
        case ENOTSOCK:          return WSAENOTSOCK;
        case EDESTADDRREQ:      return WSAEDESTADDRREQ;
        case EMSGSIZE:          return WSAEMSGSIZE;
        case EPROTOTYPE:        return WSAEPROTOTYPE;
        case ENOPROTOOPT:       return WSAENOPROTOOPT;
        case EPROTONOSUPPORT:   return WSAEPROTONOSUPPORT;
        case ESOCKTNOSUPPORT:   return WSAESOCKTNOSUPPORT;
        case EOPNOTSUPP:        return WSAEOPNOTSUPP;
        case EPFNOSUPPORT:      return WSAEPFNOSUPPORT;
        case EAFNOSUPPORT:      return WSAEAFNOSUPPORT;
        case EADDRINUSE:        return WSAEADDRINUSE;
        case EADDRNOTAVAIL:     return WSAEADDRNOTAVAIL;
        case ENETDOWN:          return WSAENETDOWN;
        case ENETUNREACH:       return WSAENETUNREACH;
        case ENETRESET:         return WSAENETRESET;
        case ECONNABORTED:      return WSAECONNABORTED;
748
        case EPIPE:
749 750 751 752 753 754 755 756 757 758 759 760 761
        case ECONNRESET:        return WSAECONNRESET;
        case ENOBUFS:           return WSAENOBUFS;
        case EISCONN:           return WSAEISCONN;
        case ENOTCONN:          return WSAENOTCONN;
        case ESHUTDOWN:         return WSAESHUTDOWN;
        case ETOOMANYREFS:      return WSAETOOMANYREFS;
        case ETIMEDOUT:         return WSAETIMEDOUT;
        case ECONNREFUSED:      return WSAECONNREFUSED;
        case ELOOP:             return WSAELOOP;
        case ENAMETOOLONG:      return WSAENAMETOOLONG;
        case EHOSTDOWN:         return WSAEHOSTDOWN;
        case EHOSTUNREACH:      return WSAEHOSTUNREACH;
        case ENOTEMPTY:         return WSAENOTEMPTY;
762
#ifdef EPROCLIM
763
        case EPROCLIM:          return WSAEPROCLIM;
764 765
#endif
#ifdef EUSERS
766
        case EUSERS:            return WSAEUSERS;
767 768
#endif
#ifdef EDQUOT
769
        case EDQUOT:            return WSAEDQUOT;
770 771
#endif
#ifdef ESTALE
772
        case ESTALE:            return WSAESTALE;
773 774
#endif
#ifdef EREMOTE
775
        case EREMOTE:           return WSAEREMOTE;
776
#endif
777 778

        case 0:                 return 0;
779 780 781 782
        default:
            errno = err;
            perror("wineserver: sock_get_error() can't map error");
            return WSAEFAULT;
783 784 785
    }
}

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
static int sock_get_ntstatus( int err )
{
    switch ( err )
    {
        case EBADF:             return STATUS_INVALID_HANDLE;
        case EBUSY:             return STATUS_DEVICE_BUSY;
        case EPERM:
        case EACCES:            return STATUS_ACCESS_DENIED;
        case EFAULT:            return STATUS_NO_MEMORY;
        case EINVAL:            return STATUS_INVALID_PARAMETER;
        case ENFILE:
        case EMFILE:            return STATUS_TOO_MANY_OPENED_FILES;
        case EWOULDBLOCK:       return STATUS_CANT_WAIT;
        case EINPROGRESS:       return STATUS_PENDING;
        case EALREADY:          return STATUS_NETWORK_BUSY;
        case ENOTSOCK:          return STATUS_OBJECT_TYPE_MISMATCH;
        case EDESTADDRREQ:      return STATUS_INVALID_PARAMETER;
        case EMSGSIZE:          return STATUS_BUFFER_OVERFLOW;
        case EPROTONOSUPPORT:
        case ESOCKTNOSUPPORT:
        case EPFNOSUPPORT:
        case EAFNOSUPPORT:
        case EPROTOTYPE:        return STATUS_NOT_SUPPORTED;
        case ENOPROTOOPT:       return STATUS_INVALID_PARAMETER;
        case EOPNOTSUPP:        return STATUS_NOT_SUPPORTED;
        case EADDRINUSE:        return STATUS_ADDRESS_ALREADY_ASSOCIATED;
        case EADDRNOTAVAIL:     return STATUS_INVALID_PARAMETER;
        case ECONNREFUSED:      return STATUS_CONNECTION_REFUSED;
        case ESHUTDOWN:         return STATUS_PIPE_DISCONNECTED;
        case ENOTCONN:          return STATUS_CONNECTION_DISCONNECTED;
        case ETIMEDOUT:         return STATUS_IO_TIMEOUT;
        case ENETUNREACH:       return STATUS_NETWORK_UNREACHABLE;
        case ENETDOWN:          return STATUS_NETWORK_BUSY;
        case EPIPE:
        case ECONNRESET:        return STATUS_CONNECTION_RESET;
        case ECONNABORTED:      return STATUS_CONNECTION_ABORTED;

        case 0:                 return STATUS_SUCCESS;
        default:
            errno = err;
            perror("wineserver: sock_get_ntstatus() can't map error");
            return STATUS_UNSUCCESSFUL;
    }
}

831 832 833
/* set the last error depending on errno */
static void sock_set_error(void)
{
834
    set_error( sock_get_ntstatus( errno ) );
835 836
}

837 838 839 840 841
/* create a socket */
DECL_HANDLER(create_socket)
{
    struct object *obj;

842
    reply->handle = 0;
843
    if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
844
    {
845
        reply->handle = alloc_handle( current->process, obj, req->access, req->attributes );
846 847 848 849 850 851 852
        release_object( obj );
    }
}

/* accept a socket */
DECL_HANDLER(accept_socket)
{
853
    struct sock *sock;
854

855
    reply->handle = 0;
856
    if ((sock = accept_socket( req->lhandle )) != NULL)
857
    {
858
        reply->handle = alloc_handle( current->process, &sock->obj, req->access, req->attributes );
859 860 861
        sock->wparam = reply->handle;  /* wparam for message is the socket handle */
        sock_reselect( sock );
        release_object( &sock->obj );
862 863 864 865 866 867 868
    }
}

/* set socket event parameters */
DECL_HANDLER(set_socket_event)
{
    struct sock *sock;
869
    struct event *old_event;
870
    int pollev;
871

872 873
    if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
                                                FILE_WRITE_ATTRIBUTES, &sock_ops))) return;
874
    old_event = sock->event;
875
    sock->mask    = req->mask;
876
    sock->hmask   &= ~req->mask; /* re-enable held events */
877 878 879 880 881 882
    sock->event   = NULL;
    sock->window  = req->window;
    sock->message = req->msg;
    sock->wparam  = req->handle;  /* wparam is the socket handle */
    if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );

883
    if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
884 885

    pollev = sock_reselect( sock );
886
    if ( pollev ) sock_try_event( sock, pollev );
887

888
    if (sock->mask)
889
        sock->state |= FD_WINE_NONBLOCKING;
890

891
    /* if a network event is pending, signal the event object
892
       it is possible that FD_CONNECT or FD_ACCEPT network events has happened
893
       before a WSAEventSelect() was done on it.
894
       (when dealing with Asynchronous socket)  */
895
    if (sock->pmask & sock->mask) sock_wake_up( sock, pollev );
896 897

    if (old_event) release_object( old_event ); /* we're through with it */
898 899 900 901 902 903 904
    release_object( &sock->obj );
}

/* get socket event parameters */
DECL_HANDLER(get_socket_event)
{
    struct sock *sock;
905 906
    int i;
    int errors[FD_MAX_EVENTS];
907

908
    sock = (struct sock *)get_handle_obj( current->process, req->handle, FILE_READ_ATTRIBUTES, &sock_ops );
909 910
    if (!sock)
    {
911 912 913 914
        reply->mask  = 0;
        reply->pmask = 0;
        reply->state = 0;
        return;
915
    }
916 917 918
    reply->mask  = sock->mask;
    reply->pmask = sock->pmask;
    reply->state = sock->state;
919 920 921 922
    for (i = 0; i < FD_MAX_EVENTS; i++)
        errors[i] = sock_get_ntstatus(sock->errors[i]);

    set_reply_data( errors, min( get_reply_max_size(), sizeof(errors) ));
923

924 925
    if (req->service)
    {
926
        if (req->c_event)
927
        {
928 929 930
            struct event *cevent = get_event_obj( current->process, req->c_event,
                                                  EVENT_MODIFY_STATE );
            if (cevent)
931 932 933 934
            {
                reset_event( cevent );
                release_object( cevent );
            }
935
        }
936 937
        sock->pmask = 0;
        sock_reselect( sock );
938 939 940 941 942 943 944 945
    }
    release_object( &sock->obj );
}

/* re-enable pending socket events */
DECL_HANDLER(enable_socket_event)
{
    struct sock *sock;
946
    int pollev;
947

948
    if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
949
                                               FILE_WRITE_ATTRIBUTES, &sock_ops)))
950 951
        return;

952 953 954 955
    sock->pmask &= ~req->mask; /* is this safe? */
    sock->hmask &= ~req->mask;
    sock->state |= req->sstate;
    sock->state &= ~req->cstate;
956
    if ( sock->type != SOCK_STREAM ) sock->state &= ~STREAM_FLAG_MASK;
957 958

    pollev = sock_reselect( sock );
959
    if ( pollev ) sock_try_event( sock, pollev );
960

961 962
    release_object( &sock->obj );
}
963 964 965 966 967

DECL_HANDLER(set_socket_deferred)
{
    struct sock *sock, *acceptsock;

968
    sock=(struct sock *)get_handle_obj( current->process, req->handle, FILE_WRITE_ATTRIBUTES, &sock_ops );
969 970
    if ( !sock )
        return;
971

972
    acceptsock = (struct sock *)get_handle_obj( current->process, req->deferred, 0, &sock_ops );
973 974
    if ( !acceptsock )
    {
975
        release_object( sock );
976 977 978
        return;
    }
    sock->deferred = acceptsock;
979
    release_object( sock );
980
}