sock.c 42.6 KB
Newer Older
1 2 3
/*
 * Server-side socket management
 *
4
 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
5
 *
6 7 8 9 10 11 12 13 14 15 16 17
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19
 *
20 21 22 23
 * FIXME: we use read|write access in all cases. Shouldn't we depend that
 * on the access of the current handle?
 */

24 25
#include "config.h"

26 27
#include <assert.h>
#include <fcntl.h>
28
#include <stdarg.h>
29 30 31 32
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
33 34 35
#ifdef HAVE_POLL_H
# include <poll.h>
#endif
36 37
#include <sys/time.h>
#include <sys/types.h>
38 39 40
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
Steven Edwards's avatar
Steven Edwards committed
41
#ifdef HAVE_SYS_IOCTL_H
42
#include <sys/ioctl.h>
Steven Edwards's avatar
Steven Edwards committed
43
#endif
44 45 46
#ifdef HAVE_SYS_FILIO_H
# include <sys/filio.h>
#endif
47 48
#include <time.h>
#include <unistd.h>
49 50 51 52
#include <limits.h>
#ifdef HAVE_LINUX_RTNETLINK_H
# include <linux/rtnetlink.h>
#endif
53

54 55
#include "ntstatus.h"
#define WIN32_NO_STATUS
56
#include "windef.h"
57
#include "winternl.h"
58
#include "winerror.h"
59 60
#define USE_WS_PREFIX
#include "winsock2.h"
61

62
#include "process.h"
63
#include "file.h"
64 65 66
#include "handle.h"
#include "thread.h"
#include "request.h"
67
#include "user.h"
68

69 70 71 72 73 74 75 76 77 78 79
/* From winsock.h */
#define FD_MAX_EVENTS              10
#define FD_READ_BIT                0
#define FD_WRITE_BIT               1
#define FD_OOB_BIT                 2
#define FD_ACCEPT_BIT              3
#define FD_CONNECT_BIT             4
#define FD_CLOSE_BIT               5

/*
 * Define flags to be used with the WSAAsyncSelect() call.
80
 */
81 82 83 84 85 86 87 88 89 90 91 92 93 94
#define FD_READ                    0x00000001
#define FD_WRITE                   0x00000002
#define FD_OOB                     0x00000004
#define FD_ACCEPT                  0x00000008
#define FD_CONNECT                 0x00000010
#define FD_CLOSE                   0x00000020

/* internal per-socket flags */
#define FD_WINE_LISTENING          0x10000000
#define FD_WINE_NONBLOCKING        0x20000000
#define FD_WINE_CONNECTED          0x40000000
#define FD_WINE_RAW                0x80000000
#define FD_WINE_INTERNAL           0xFFFF0000

95 96 97
struct sock
{
    struct object       obj;         /* object header */
98
    struct fd          *fd;          /* socket file descriptor */
99 100 101 102
    unsigned int        state;       /* status bits */
    unsigned int        mask;        /* event mask */
    unsigned int        hmask;       /* held (blocked) events */
    unsigned int        pmask;       /* pending events */
103
    unsigned int        flags;       /* socket flags */
104
    int                 polling;     /* is socket being polled? */
105
    unsigned short      proto;       /* socket protocol */
106 107
    unsigned short      type;        /* socket type */
    unsigned short      family;      /* socket family */
108
    struct event       *event;       /* event object */
109 110
    user_handle_t       window;      /* window to send the message to */
    unsigned int        message;     /* message to send */
111
    obj_handle_t        wparam;      /* message wparam (socket handle) */
112
    int                 errors[FD_MAX_EVENTS]; /* event errors */
113
    timeout_t           connect_time;/* time the socket was connected */
114
    struct sock        *deferred;    /* socket that waits for a deferred accept */
115 116
    struct async_queue *read_q;      /* queue for asynchronous reads */
    struct async_queue *write_q;     /* queue for asynchronous writes */
117 118 119
    struct async_queue *ifchange_q;  /* queue for interface change notifications */
    struct object      *ifchange_obj; /* the interface change notification object */
    struct list         ifchange_entry; /* entry in ifchange notification list */
120 121 122
};

static void sock_dump( struct object *obj, int verbose );
123
static int sock_signaled( struct object *obj, struct wait_queue_entry *entry );
124
static struct fd *sock_get_fd( struct object *obj );
125
static void sock_destroy( struct object *obj );
126 127
static struct async_queue *sock_get_ifchange_q( struct sock *sock );
static void sock_destroy_ifchange_q( struct sock *sock );
128 129 130

static int sock_get_poll_events( struct fd *fd );
static void sock_poll_event( struct fd *fd, int event );
131
static enum server_fd_type sock_get_fd_type( struct fd *fd );
132
static obj_handle_t sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
133
static void sock_queue_async( struct fd *fd, struct async *async, int type, int count );
134
static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
135

136
static int sock_get_ntstatus( int err );
137
static int sock_get_error( int err );
138 139 140 141
static void sock_set_error(void);

static const struct object_ops sock_ops =
{
142 143
    sizeof(struct sock),          /* size */
    sock_dump,                    /* dump */
144
    no_get_type,                  /* get_type */
145 146 147 148
    add_queue,                    /* add_queue */
    remove_queue,                 /* remove_queue */
    sock_signaled,                /* signaled */
    no_satisfied,                 /* satisfied */
149
    no_signal,                    /* signal */
150
    sock_get_fd,                  /* get_fd */
151
    default_fd_map_access,        /* map_access */
152 153
    default_get_sd,               /* get_sd */
    default_set_sd,               /* set_sd */
154
    no_lookup_name,               /* lookup_name */
155 156
    no_link_name,                 /* link_name */
    NULL,                         /* unlink_name */
157
    no_open_file,                 /* open_file */
158
    fd_close_handle,              /* close_handle */
159 160 161 162 163
    sock_destroy                  /* destroy */
};

static const struct fd_ops sock_fd_ops =
{
164 165
    sock_get_poll_events,         /* get_poll_events */
    sock_poll_event,              /* poll_event */
166
    sock_get_fd_type,             /* get_fd_type */
167 168 169
    no_fd_read,                   /* read */
    no_fd_write,                  /* write */
    no_fd_flush,                  /* flush */
170
    sock_ioctl,                   /* ioctl */
171
    sock_queue_async,             /* queue_async */
172
    sock_reselect_async           /* reselect_async */
173 174
};

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193

/* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
 * we post messages if there are multiple events.  Used to send
 * messages.  The problem is if there is both a FD_CONNECT event and,
 * say, an FD_READ event available on the same socket, we want to
 * notify the app of the connect event first.  Otherwise it may
 * discard the read event because it thinks it hasn't connected yet.
 */
static const int event_bitorder[FD_MAX_EVENTS] =
{
    FD_CONNECT_BIT,
    FD_ACCEPT_BIT,
    FD_OOB_BIT,
    FD_WRITE_BIT,
    FD_READ_BIT,
    FD_CLOSE_BIT,
    6, 7, 8, 9  /* leftovers */
};

194 195 196 197 198 199 200 201 202 203 204
/* Flags that make sense only for SOCK_STREAM sockets */
#define STREAM_FLAG_MASK ((unsigned int) (FD_CONNECT | FD_ACCEPT | FD_WINE_LISTENING | FD_WINE_CONNECTED))

typedef enum {
    SOCK_SHUTDOWN_ERROR = -1,
    SOCK_SHUTDOWN_EOF = 0,
    SOCK_SHUTDOWN_POLLHUP = 1
} sock_shutdown_t;

static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;

205
static sock_shutdown_t sock_check_pollhup(void)
206 207 208 209 210 211
{
    sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
    int fd[2], n;
    struct pollfd pfd;
    char dummy;

212
    if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) return ret;
213
    if ( shutdown( fd[0], 1 ) ) goto out;
214 215 216 217 218

    pfd.fd = fd[1];
    pfd.events = POLLIN;
    pfd.revents = 0;

219 220
    /* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */
    n = poll( &pfd, 1, 1 );
221 222 223 224
    if ( n != 1 ) goto out; /* error or timeout */
    if ( pfd.revents & POLLHUP )
        ret = SOCK_SHUTDOWN_POLLHUP;
    else if ( pfd.revents & POLLIN &&
225
              read( fd[1], &dummy, 1 ) == 0 )
226 227 228
        ret = SOCK_SHUTDOWN_EOF;

out:
229 230
    close( fd[0] );
    close( fd[1] );
231 232 233 234 235
    return ret;
}

void sock_init(void)
{
236
    sock_shutdown_type = sock_check_pollhup();
237 238 239 240

    switch ( sock_shutdown_type )
    {
    case SOCK_SHUTDOWN_EOF:
241
        if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
242 243
        break;
    case SOCK_SHUTDOWN_POLLHUP:
244
        if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
245 246
        break;
    default:
247
        fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
248 249 250
        sock_shutdown_type = SOCK_SHUTDOWN_EOF;
    }
}
251

252
static int sock_reselect( struct sock *sock )
253
{
254
    int ev = sock_get_poll_events( sock->fd );
255

256
    if (debug_level)
257
        fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
258

259 260
    if (!sock->polling)  /* FIXME: should find a better way to do this */
    {
261
        /* previously unconnected socket, is this reselect supposed to connect it? */
262
        if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
263
        /* ok, it is, attach it to the wineserver's main poll loop */
264
        sock->polling = 1;
265
        allow_fd_caching( sock->fd );
266 267
    }
    /* update condition mask */
268
    set_fd_events( sock->fd, ev );
269 270 271
    return ev;
}

272
/* wake anybody waiting on the socket event or send the associated message */
273
static void sock_wake_up( struct sock *sock )
274 275 276
{
    unsigned int events = sock->pmask & sock->mask;
    int i;
277

278
    if ( !events ) return;
279

280 281 282 283 284 285 286
    if (sock->event)
    {
        if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
        set_event( sock->event );
    }
    if (sock->window)
    {
287
        if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
288 289 290 291 292
        for (i = 0; i < FD_MAX_EVENTS; i++)
        {
            int event = event_bitorder[i];
            if (sock->pmask & (1 << event))
            {
293
                lparam_t lparam = (1 << event) | (sock_get_error(sock->errors[event]) << 16);
294
                post_message( sock->window, sock->message, sock->wparam, lparam );
295 296 297 298 299
            }
        }
        sock->pmask = 0;
        sock_reselect( sock );
    }
300 301
}

302
static inline int sock_error( struct fd *fd )
303
{
304 305
    unsigned int optval = 0;
    socklen_t optlen = sizeof(optval);
306

307
    getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
308
    return optval;
309 310
}

311
static int sock_dispatch_asyncs( struct sock *sock, int event, int error )
312 313 314
{
    if ( sock->flags & WSA_FLAG_OVERLAPPED )
    {
315
        if ( event & (POLLIN|POLLPRI) && async_waiting( sock->read_q ) )
316 317 318
        {
            if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
            async_wake_up( sock->read_q, STATUS_ALERTED );
319
            event &= ~(POLLIN|POLLPRI);
320
        }
321
        if ( event & POLLOUT && async_waiting( sock->write_q ) )
322 323 324
        {
            if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
            async_wake_up( sock->write_q, STATUS_ALERTED );
325
            event &= ~POLLOUT;
326
        }
327 328
        if ( event & (POLLERR|POLLHUP) )
        {
329 330
            int status = sock_get_ntstatus( error );

331
            if ( !(sock->state & FD_READ) )
332
                async_wake_up( sock->read_q, status );
333
            if ( !(sock->state & FD_WRITE) )
334
                async_wake_up( sock->write_q, status );
335
        }
336
    }
337
    return event;
338 339
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
static void sock_dispatch_events( struct sock *sock, int prevstate, int event, int error )
{
    if (prevstate & FD_CONNECT)
    {
        sock->pmask |= FD_CONNECT;
        sock->hmask |= FD_CONNECT;
        sock->errors[FD_CONNECT_BIT] = error;
        goto end;
    }
    if (prevstate & FD_WINE_LISTENING)
    {
        sock->pmask |= FD_ACCEPT;
        sock->hmask |= FD_ACCEPT;
        sock->errors[FD_ACCEPT_BIT] = error;
        goto end;
    }

    if (event & POLLIN)
    {
        sock->pmask |= FD_READ;
        sock->hmask |= FD_READ;
        sock->errors[FD_READ_BIT] = 0;
    }

    if (event & POLLOUT)
    {
        sock->pmask |= FD_WRITE;
        sock->hmask |= FD_WRITE;
        sock->errors[FD_WRITE_BIT] = 0;
    }

    if (event & POLLPRI)
    {
        sock->pmask |= FD_OOB;
        sock->hmask |= FD_OOB;
        sock->errors[FD_OOB_BIT] = 0;
    }

    if (event & (POLLERR|POLLHUP))
    {
        sock->pmask |= FD_CLOSE;
        sock->hmask |= FD_CLOSE;
        sock->errors[FD_CLOSE_BIT] = error;
    }
end:
    sock_wake_up( sock );
}

388
static void sock_poll_event( struct fd *fd, int event )
389
{
390
    struct sock *sock = get_fd_user( fd );
391
    int hangup_seen = 0;
392 393
    int prevstate = sock->state;
    int error = 0;
394

395
    assert( sock->obj.ops == &sock_ops );
396
    if (debug_level)
397
        fprintf(stderr, "socket %p select event: %x\n", sock, event);
398 399 400 401

    /* we may change event later, remove from loop here */
    if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 );

402
    if (sock->state & FD_CONNECT)
403
    {
404
        if (event & (POLLERR|POLLHUP))
405
        {
406
            /* we didn't get connected? */
407
            sock->state &= ~FD_CONNECT;
408 409
            event &= ~POLLOUT;
            error = sock_error( fd );
410
        }
411
        else if (event & POLLOUT)
412
        {
413 414
            /* we got connected */
            sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
415
            sock->state &= ~FD_CONNECT;
416
            sock->connect_time = current_time;
417
        }
418 419
    }
    else if (sock->state & FD_WINE_LISTENING)
420 421
    {
        /* listening */
422 423
        if (event & (POLLERR|POLLHUP))
            error = sock_error( fd );
424 425
    }
    else
426 427
    {
        /* normal data flow */
428
        if ( sock->type == SOCK_STREAM && ( event & POLLIN ) )
429
        {
430
            char dummy;
431
            int nr;
432 433 434

            /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
             * has been closed, so we need to check for it explicitly here */
435
            nr  = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK );
436
            if ( nr == 0 )
437
            {
438
                hangup_seen = 1;
439 440
                event &= ~POLLIN;
            }
441
            else if ( nr < 0 )
442
            {
443
                event &= ~POLLIN;
444 445
                /* EAGAIN can happen if an async recv() falls between the server's poll()
                   call and the invocation of this routine */
446
                if ( errno != EAGAIN )
447
                {
448 449
                    error = errno;
                    event |= POLLERR;
450
                    if ( debug_level )
451
                        fprintf( stderr, "recv error on socket %p: %d\n", sock, errno );
452 453
                }
            }
454
        }
455

456
        if ( (hangup_seen || event & (POLLHUP|POLLERR)) && (sock->state & (FD_READ|FD_WRITE)) )
457
        {
458
            error = error ? error : sock_error( fd );
459
            if ( (event & POLLERR) || ( sock_shutdown_type == SOCK_SHUTDOWN_EOF && (event & POLLHUP) ))
460
                sock->state &= ~FD_WRITE;
461 462
            sock->state &= ~FD_READ;

463
            if (debug_level)
464
                fprintf(stderr, "socket %p aborted by error %d, event: %x\n", sock, error, event);
465 466
        }

467 468
        if (hangup_seen)
            event |= POLLHUP;
469
    }
470

471
    event = sock_dispatch_asyncs( sock, event, error );
472
    sock_dispatch_events( sock, prevstate, event, error );
473

474 475 476
    /* if anyone is stupid enough to wait on the socket object itself,
     * maybe we should wake them up too, just in case? */
    wake_up( &sock->obj, 0 );
477 478

    sock_reselect( sock );
479 480 481 482 483 484
}

static void sock_dump( struct object *obj, int verbose )
{
    struct sock *sock = (struct sock *)obj;
    assert( obj->ops == &sock_ops );
485
    fprintf( stderr, "Socket fd=%p, state=%x, mask=%x, pending=%x, held=%x\n",
486
            sock->fd, sock->state,
487
            sock->mask, sock->pmask, sock->hmask );
488 489
}

490
static int sock_signaled( struct object *obj, struct wait_queue_entry *entry )
491
{
492
    struct sock *sock = (struct sock *)obj;
493 494
    assert( obj->ops == &sock_ops );

495
    return check_fd_events( sock->fd, sock_get_poll_events( sock->fd ) ) != 0;
496 497
}

498
static int sock_get_poll_events( struct fd *fd )
499
{
500
    struct sock *sock = get_fd_user( fd );
501 502
    unsigned int mask = sock->mask & ~sock->hmask;
    unsigned int smask = sock->state & mask;
503 504
    int ev = 0;

505
    assert( sock->obj.ops == &sock_ops );
506

507
    if (sock->state & FD_CONNECT)
508 509 510
        /* connecting, wait for writable */
        return POLLOUT;

511 512 513 514
    if ( async_queued( sock->read_q ) )
    {
        if ( async_waiting( sock->read_q ) ) ev |= POLLIN | POLLPRI;
    }
515
    else if (smask & FD_READ || (sock->state & FD_WINE_LISTENING && mask & FD_ACCEPT))
516
        ev |= POLLIN | POLLPRI;
517
    /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication for stream sockets. */
518 519
    else if ( sock->type == SOCK_STREAM && sock->state & FD_READ && mask & FD_CLOSE &&
              !(sock->hmask & FD_READ) )
520 521
        ev |= POLLIN;

522 523 524 525 526 527 528
    if ( async_queued( sock->write_q ) )
    {
        if ( async_waiting( sock->write_q ) ) ev |= POLLOUT;
    }
    else if (smask & FD_WRITE)
        ev |= POLLOUT;

529
    return ev;
530 531
}

532
static enum server_fd_type sock_get_fd_type( struct fd *fd )
533
{
534
    return FD_TYPE_SOCKET;
535 536
}

537
obj_handle_t sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
538 539
{
    struct sock *sock = get_fd_user( fd );
540
    obj_handle_t wait_handle = 0;
541
    struct async_queue *ifchange_q;
542 543 544 545 546 547

    assert( sock->obj.ops == &sock_ops );

    switch(code)
    {
    case WS_SIO_ADDRESS_LIST_CHANGE:
548
        if ((sock->state & FD_WINE_NONBLOCKING) && async_is_blocking( async ))
549 550 551 552
        {
            set_error( STATUS_CANT_WAIT );
            return 0;
        }
553
        if (!(ifchange_q = sock_get_ifchange_q( sock ))) return 0;
554
        queue_async( ifchange_q, async );
555
        if (async_is_blocking( async )) wait_handle = alloc_handle( current->process, async, SYNCHRONIZE, 0 );
556
        set_error( STATUS_PENDING );
557
        return wait_handle;
558 559 560 561 562 563
    default:
        set_error( STATUS_NOT_SUPPORTED );
        return 0;
    }
}

564
static void sock_queue_async( struct fd *fd, struct async *async, int type, int count )
565
{
566
    struct sock *sock = get_fd_user( fd );
567
    struct async_queue *queue;
568

569
    assert( sock->obj.ops == &sock_ops );
570

571
    switch (type)
572 573
    {
    case ASYNC_TYPE_READ:
574 575
        if (!sock->read_q && !(sock->read_q = create_async_queue( sock->fd ))) return;
        queue = sock->read_q;
576 577
        break;
    case ASYNC_TYPE_WRITE:
578 579
        if (!sock->write_q && !(sock->write_q = create_async_queue( sock->fd ))) return;
        queue = sock->write_q;
580 581 582 583 584 585
        break;
    default:
        set_error( STATUS_INVALID_PARAMETER );
        return;
    }

586
    if ( ( !( sock->state & (FD_READ|FD_CONNECT|FD_WINE_LISTENING) ) && type == ASYNC_TYPE_READ  ) ||
587
         ( !( sock->state & (FD_WRITE|FD_CONNECT) ) && type == ASYNC_TYPE_WRITE ) )
588
    {
589
        set_error( STATUS_PIPE_DISCONNECTED );
590
        return;
591
    }
592

593
    queue_async( queue, async );
594
    sock_reselect( sock );
595 596

    set_error( STATUS_PENDING );
597 598
}

599 600 601
static void sock_reselect_async( struct fd *fd, struct async_queue *queue )
{
    struct sock *sock = get_fd_user( fd );
602 603 604
    /* ignore reselect on ifchange queue */
    if (sock->ifchange_q != queue)
        sock_reselect( sock );
605 606
}

607 608 609 610 611 612
static struct fd *sock_get_fd( struct object *obj )
{
    struct sock *sock = (struct sock *)obj;
    return (struct fd *)grab_object( sock->fd );
}

613 614 615 616 617 618
static void sock_destroy( struct object *obj )
{
    struct sock *sock = (struct sock *)obj;
    assert( obj->ops == &sock_ops );

    /* FIXME: special socket shutdown stuff? */
619

620
    if ( sock->deferred )
621
        release_object( sock->deferred );
622

623 624
    free_async_queue( sock->read_q );
    free_async_queue( sock->write_q );
625
    async_wake_up( sock->ifchange_q, STATUS_CANCELLED );
626
    sock_destroy_ifchange_q( sock );
627
    if (sock->event) release_object( sock->event );
628 629 630 631 632 633
    if (sock->fd)
    {
        /* shut the socket down to force pending poll() calls in the client to return */
        shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
        release_object( sock->fd );
    }
634 635
}

636 637 638 639 640 641 642 643 644 645 646 647 648 649
static void init_sock(struct sock *sock)
{
    sock->state = 0;
    sock->mask    = 0;
    sock->hmask   = 0;
    sock->pmask   = 0;
    sock->polling = 0;
    sock->flags   = 0;
    sock->type    = 0;
    sock->family  = 0;
    sock->event   = NULL;
    sock->window  = 0;
    sock->message = 0;
    sock->wparam  = 0;
650
    sock->connect_time = 0;
651 652 653
    sock->deferred = NULL;
    sock->read_q  = NULL;
    sock->write_q = NULL;
654 655
    sock->ifchange_q = NULL;
    sock->ifchange_obj = NULL;
656 657 658
    memset( sock->errors, 0, sizeof(sock->errors) );
}

659
/* create a new and unconnected socket */
660
static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
661 662
{
    struct sock *sock;
663
    int sockfd;
664

665 666 667
    sockfd = socket( family, type, protocol );
    if (debug_level)
        fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
668 669
    if (sockfd == -1)
    {
670 671 672 673
        sock_set_error();
        return NULL;
    }
    fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
674 675 676 677 678
    if (!(sock = alloc_object( &sock_ops )))
    {
        close( sockfd );
        return NULL;
    }
679 680 681
    init_sock( sock );
    sock->state  = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
    sock->flags  = flags;
682
    sock->proto  = protocol;
683 684 685
    sock->type   = type;
    sock->family = family;

686 687
    if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj,
                            (flags & WSA_FLAG_OVERLAPPED) ? 0 : FILE_SYNCHRONOUS_IO_NONALERT )))
688 689 690 691
    {
        release_object( sock );
        return NULL;
    }
692 693
    sock_reselect( sock );
    clear_error();
694 695 696
    return &sock->obj;
}

697 698 699 700 701 702 703 704 705 706
/* accepts a socket and inits it */
static int accept_new_fd( struct sock *sock )
{

    /* Try to accept(2). We can't be safe that this an already connected socket
     * or that accept() is allowed on it. In those cases we will get -1/errno
     * return.
     */
    int acceptfd;
    struct sockaddr saddr;
707
    socklen_t slen = sizeof(saddr);
708 709 710 711 712 713 714 715 716 717 718
    acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen);
    if (acceptfd == -1)
    {
        sock_set_error();
        return acceptfd;
    }

    fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
    return acceptfd;
}

719
/* accept a socket (creates a new fd) */
720
static struct sock *accept_socket( obj_handle_t handle )
721 722 723 724 725
{
    struct sock *acceptsock;
    struct sock *sock;
    int	acceptfd;

726
    sock = (struct sock *)get_handle_obj( current->process, handle, FILE_READ_DATA, &sock_ops );
727
    if (!sock)
728
        return NULL;
729

730 731
    if ( sock->deferred )
    {
732 733
        acceptsock = sock->deferred;
        sock->deferred = NULL;
734 735 736
    }
    else
    {
737
        if ((acceptfd = accept_new_fd( sock )) == -1)
738
        {
739 740 741
            release_object( sock );
            return NULL;
        }
742
        if (!(acceptsock = alloc_object( &sock_ops )))
743
        {
744
            close( acceptfd );
745 746 747
            release_object( sock );
            return NULL;
        }
748

749
        init_sock( acceptsock );
750 751 752 753 754
        /* newly created socket gets the same properties of the listening socket */
        acceptsock->state  = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
        if (sock->state & FD_WINE_NONBLOCKING)
            acceptsock->state |= FD_WINE_NONBLOCKING;
        acceptsock->mask    = sock->mask;
755
        acceptsock->proto   = sock->proto;
756 757
        acceptsock->type    = sock->type;
        acceptsock->family  = sock->family;
758 759
        acceptsock->window  = sock->window;
        acceptsock->message = sock->message;
760
        acceptsock->connect_time = current_time;
761 762
        if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
        acceptsock->flags = sock->flags;
763 764
        if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
                                                    get_fd_options( sock->fd ) )))
765 766 767 768 769
        {
            release_object( acceptsock );
            release_object( sock );
            return NULL;
        }
770
    }
771 772 773
    clear_error();
    sock->pmask &= ~FD_ACCEPT;
    sock->hmask &= ~FD_ACCEPT;
774
    sock_reselect( sock );
775
    release_object( sock );
776
    return acceptsock;
777 778
}

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
static int accept_into_socket( struct sock *sock, struct sock *acceptsock )
{
    int acceptfd;
    struct fd *newfd;
    if ( sock->deferred )
    {
        newfd = dup_fd_object( sock->deferred->fd, 0, 0,
                               get_fd_options( acceptsock->fd ) );
        if ( !newfd )
            return FALSE;

        set_fd_user( newfd, &sock_fd_ops, &acceptsock->obj );

        release_object( sock->deferred );
        sock->deferred = NULL;
    }
    else
    {
        if ((acceptfd = accept_new_fd( sock )) == -1)
            return FALSE;

        if (!(newfd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
                                            get_fd_options( acceptsock->fd ) )))
            return FALSE;
    }

    acceptsock->state  |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
    acceptsock->hmask   = 0;
    acceptsock->pmask   = 0;
    acceptsock->polling = 0;
809
    acceptsock->proto   = sock->proto;
810 811 812 813
    acceptsock->type    = sock->type;
    acceptsock->family  = sock->family;
    acceptsock->wparam  = 0;
    acceptsock->deferred = NULL;
814
    acceptsock->connect_time = current_time;
815
    fd_copy_completion( acceptsock->fd, newfd );
816
    release_object( acceptsock->fd );
817 818 819 820 821 822 823 824 825 826
    acceptsock->fd = newfd;

    clear_error();
    sock->pmask &= ~FD_ACCEPT;
    sock->hmask &= ~FD_ACCEPT;
    sock_reselect( sock );

    return TRUE;
}

Bruno Jesus's avatar
Bruno Jesus committed
827
/* return an errno value mapped to a WSA error */
828
static int sock_get_error( int err )
829
{
830
    switch (err)
831
    {
832 833
        case EINTR:             return WSAEINTR;
        case EBADF:             return WSAEBADF;
834
        case EPERM:
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
        case EACCES:            return WSAEACCES;
        case EFAULT:            return WSAEFAULT;
        case EINVAL:            return WSAEINVAL;
        case EMFILE:            return WSAEMFILE;
        case EWOULDBLOCK:       return WSAEWOULDBLOCK;
        case EINPROGRESS:       return WSAEINPROGRESS;
        case EALREADY:          return WSAEALREADY;
        case ENOTSOCK:          return WSAENOTSOCK;
        case EDESTADDRREQ:      return WSAEDESTADDRREQ;
        case EMSGSIZE:          return WSAEMSGSIZE;
        case EPROTOTYPE:        return WSAEPROTOTYPE;
        case ENOPROTOOPT:       return WSAENOPROTOOPT;
        case EPROTONOSUPPORT:   return WSAEPROTONOSUPPORT;
        case ESOCKTNOSUPPORT:   return WSAESOCKTNOSUPPORT;
        case EOPNOTSUPP:        return WSAEOPNOTSUPP;
        case EPFNOSUPPORT:      return WSAEPFNOSUPPORT;
        case EAFNOSUPPORT:      return WSAEAFNOSUPPORT;
        case EADDRINUSE:        return WSAEADDRINUSE;
        case EADDRNOTAVAIL:     return WSAEADDRNOTAVAIL;
        case ENETDOWN:          return WSAENETDOWN;
        case ENETUNREACH:       return WSAENETUNREACH;
        case ENETRESET:         return WSAENETRESET;
        case ECONNABORTED:      return WSAECONNABORTED;
858
        case EPIPE:
859 860 861 862 863 864 865 866 867 868 869 870 871
        case ECONNRESET:        return WSAECONNRESET;
        case ENOBUFS:           return WSAENOBUFS;
        case EISCONN:           return WSAEISCONN;
        case ENOTCONN:          return WSAENOTCONN;
        case ESHUTDOWN:         return WSAESHUTDOWN;
        case ETOOMANYREFS:      return WSAETOOMANYREFS;
        case ETIMEDOUT:         return WSAETIMEDOUT;
        case ECONNREFUSED:      return WSAECONNREFUSED;
        case ELOOP:             return WSAELOOP;
        case ENAMETOOLONG:      return WSAENAMETOOLONG;
        case EHOSTDOWN:         return WSAEHOSTDOWN;
        case EHOSTUNREACH:      return WSAEHOSTUNREACH;
        case ENOTEMPTY:         return WSAENOTEMPTY;
872
#ifdef EPROCLIM
873
        case EPROCLIM:          return WSAEPROCLIM;
874 875
#endif
#ifdef EUSERS
876
        case EUSERS:            return WSAEUSERS;
877 878
#endif
#ifdef EDQUOT
879
        case EDQUOT:            return WSAEDQUOT;
880 881
#endif
#ifdef ESTALE
882
        case ESTALE:            return WSAESTALE;
883 884
#endif
#ifdef EREMOTE
885
        case EREMOTE:           return WSAEREMOTE;
886
#endif
887 888

        case 0:                 return 0;
889 890 891 892
        default:
            errno = err;
            perror("wineserver: sock_get_error() can't map error");
            return WSAEFAULT;
893 894 895
    }
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
static int sock_get_ntstatus( int err )
{
    switch ( err )
    {
        case EBADF:             return STATUS_INVALID_HANDLE;
        case EBUSY:             return STATUS_DEVICE_BUSY;
        case EPERM:
        case EACCES:            return STATUS_ACCESS_DENIED;
        case EFAULT:            return STATUS_NO_MEMORY;
        case EINVAL:            return STATUS_INVALID_PARAMETER;
        case ENFILE:
        case EMFILE:            return STATUS_TOO_MANY_OPENED_FILES;
        case EWOULDBLOCK:       return STATUS_CANT_WAIT;
        case EINPROGRESS:       return STATUS_PENDING;
        case EALREADY:          return STATUS_NETWORK_BUSY;
        case ENOTSOCK:          return STATUS_OBJECT_TYPE_MISMATCH;
        case EDESTADDRREQ:      return STATUS_INVALID_PARAMETER;
        case EMSGSIZE:          return STATUS_BUFFER_OVERFLOW;
        case EPROTONOSUPPORT:
        case ESOCKTNOSUPPORT:
        case EPFNOSUPPORT:
        case EAFNOSUPPORT:
        case EPROTOTYPE:        return STATUS_NOT_SUPPORTED;
        case ENOPROTOOPT:       return STATUS_INVALID_PARAMETER;
        case EOPNOTSUPP:        return STATUS_NOT_SUPPORTED;
        case EADDRINUSE:        return STATUS_ADDRESS_ALREADY_ASSOCIATED;
        case EADDRNOTAVAIL:     return STATUS_INVALID_PARAMETER;
        case ECONNREFUSED:      return STATUS_CONNECTION_REFUSED;
        case ESHUTDOWN:         return STATUS_PIPE_DISCONNECTED;
        case ENOTCONN:          return STATUS_CONNECTION_DISCONNECTED;
        case ETIMEDOUT:         return STATUS_IO_TIMEOUT;
        case ENETUNREACH:       return STATUS_NETWORK_UNREACHABLE;
928
        case EHOSTUNREACH:      return STATUS_HOST_UNREACHABLE;
929 930 931 932 933 934 935 936 937 938 939 940 941
        case ENETDOWN:          return STATUS_NETWORK_BUSY;
        case EPIPE:
        case ECONNRESET:        return STATUS_CONNECTION_RESET;
        case ECONNABORTED:      return STATUS_CONNECTION_ABORTED;

        case 0:                 return STATUS_SUCCESS;
        default:
            errno = err;
            perror("wineserver: sock_get_ntstatus() can't map error");
            return STATUS_UNSUCCESSFUL;
    }
}

942 943 944
/* set the last error depending on errno */
static void sock_set_error(void)
{
945
    set_error( sock_get_ntstatus( errno ) );
946 947
}

948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
#ifdef HAVE_LINUX_RTNETLINK_H

/* only keep one ifchange object around, all sockets waiting for wakeups will look to it */
static struct object *ifchange_object;

static void ifchange_dump( struct object *obj, int verbose );
static struct fd *ifchange_get_fd( struct object *obj );
static void ifchange_destroy( struct object *obj );

static int ifchange_get_poll_events( struct fd *fd );
static void ifchange_poll_event( struct fd *fd, int event );

struct ifchange
{
    struct object       obj;     /* object header */
    struct fd          *fd;      /* interface change file descriptor */
    struct list         sockets; /* list of sockets to send interface change notifications */
};

static const struct object_ops ifchange_ops =
{
    sizeof(struct ifchange), /* size */
    ifchange_dump,           /* dump */
    no_get_type,             /* get_type */
    add_queue,               /* add_queue */
    NULL,                    /* remove_queue */
    NULL,                    /* signaled */
    no_satisfied,            /* satisfied */
    no_signal,               /* signal */
    ifchange_get_fd,         /* get_fd */
    default_fd_map_access,   /* map_access */
    default_get_sd,          /* get_sd */
    default_set_sd,          /* set_sd */
    no_lookup_name,          /* lookup_name */
982 983
    no_link_name,            /* link_name */
    NULL,                    /* unlink_name */
984 985 986 987 988 989 990 991 992 993
    no_open_file,            /* open_file */
    no_close_handle,         /* close_handle */
    ifchange_destroy         /* destroy */
};

static const struct fd_ops ifchange_fd_ops =
{
    ifchange_get_poll_events, /* get_poll_events */
    ifchange_poll_event,      /* poll_event */
    NULL,                     /* get_fd_type */
994 995 996 997
    no_fd_read,               /* read */
    no_fd_write,              /* write */
    no_fd_flush,              /* flush */
    no_fd_ioctl,              /* ioctl */
998
    NULL,                     /* queue_async */
999
    NULL                      /* reselect_async */
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
};

static void ifchange_dump( struct object *obj, int verbose )
{
    assert( obj->ops == &ifchange_ops );
    fprintf( stderr, "Interface change\n" );
}

static struct fd *ifchange_get_fd( struct object *obj )
{
    struct ifchange *ifchange = (struct ifchange *)obj;
    return (struct fd *)grab_object( ifchange->fd );
}

static void ifchange_destroy( struct object *obj )
{
    struct ifchange *ifchange = (struct ifchange *)obj;
    assert( obj->ops == &ifchange_ops );

    release_object( ifchange->fd );

    /* reset the global ifchange object so that it will be recreated if it is needed again */
    assert( obj == ifchange_object );
    ifchange_object = NULL;
}

static int ifchange_get_poll_events( struct fd *fd )
1027
{
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
    return POLLIN;
}

/* wake up all the sockets waiting for a change notification event */
static void ifchange_wake_up( struct object *obj, unsigned int status )
{
    struct ifchange *ifchange = (struct ifchange *)obj;
    struct list *ptr, *next;
    assert( obj->ops == &ifchange_ops );
    assert( obj == ifchange_object );

    LIST_FOR_EACH_SAFE( ptr, next, &ifchange->sockets )
    {
        struct sock *sock = LIST_ENTRY( ptr, struct sock, ifchange_entry );

        assert( sock->ifchange_q );
        async_wake_up( sock->ifchange_q, status ); /* issue ifchange notification for the socket */
        sock_destroy_ifchange_q( sock ); /* remove socket from list and decrement ifchange refcount */
    }
}

static void ifchange_poll_event( struct fd *fd, int event )
{
    struct object *ifchange = get_fd_user( fd );
    unsigned int status = STATUS_PENDING;
    char buffer[PIPE_BUF];
    int r;

    r = recv( get_unix_fd(fd), buffer, sizeof(buffer), MSG_DONTWAIT );
    if (r < 0)
    {
1059 1060
        if (errno == EWOULDBLOCK || (EWOULDBLOCK != EAGAIN && errno == EAGAIN))
            return;  /* retry when poll() says the socket is ready */
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
        status = sock_get_ntstatus( errno );
    }
    else if (r > 0)
    {
        struct nlmsghdr *nlh;

        for (nlh = (struct nlmsghdr *)buffer; NLMSG_OK(nlh, r); nlh = NLMSG_NEXT(nlh, r))
        {
            if (nlh->nlmsg_type == NLMSG_DONE)
                break;
            if (nlh->nlmsg_type == RTM_NEWADDR || nlh->nlmsg_type == RTM_DELADDR)
                status = STATUS_SUCCESS;
        }
    }
    else status = STATUS_CANCELLED;

    if (status != STATUS_PENDING) ifchange_wake_up( ifchange, status );
}

#endif

/* we only need one of these interface notification objects, all of the sockets dependent upon
 * it will wake up when a notification event occurs */
 static struct object *get_ifchange( void )
 {
#ifdef HAVE_LINUX_RTNETLINK_H
    struct ifchange *ifchange;
    struct sockaddr_nl addr;
    int unix_fd;

    if (ifchange_object)
    {
        /* increment the refcount for each socket that uses the ifchange object */
        return grab_object( ifchange_object );
    }

    /* create the socket we need for processing interface change notifications */
    unix_fd = socket( PF_NETLINK, SOCK_RAW, NETLINK_ROUTE );
    if (unix_fd == -1)
    {
        sock_set_error();
        return NULL;
    }
    fcntl( unix_fd, F_SETFL, O_NONBLOCK ); /* make socket nonblocking */
    memset( &addr, 0, sizeof(addr) );
    addr.nl_family = AF_NETLINK;
    addr.nl_groups = RTMGRP_IPV4_IFADDR;
    /* bind the socket to the special netlink kernel interface */
    if (bind( unix_fd, (struct sockaddr *)&addr, sizeof(addr) ) == -1)
    {
        close( unix_fd );
        sock_set_error();
        return NULL;
    }
    if (!(ifchange = alloc_object( &ifchange_ops )))
    {
        close( unix_fd );
        set_error( STATUS_NO_MEMORY );
        return NULL;
    }
    list_init( &ifchange->sockets );
    if (!(ifchange->fd = create_anonymous_fd( &ifchange_fd_ops, unix_fd, &ifchange->obj, 0 )))
    {
        release_object( ifchange );
        set_error( STATUS_NO_MEMORY );
        return NULL;
    }
    set_fd_events( ifchange->fd, POLLIN ); /* enable read wakeup on the file descriptor */

    /* the ifchange object is now successfully configured */
    ifchange_object = &ifchange->obj;
    return &ifchange->obj;
#else
1134 1135
    set_error( STATUS_NOT_SUPPORTED );
    return NULL;
1136
#endif
1137 1138
}

1139
/* add the socket to the interface change notification list */
1140 1141
static void ifchange_add_sock( struct object *obj, struct sock *sock )
{
1142 1143 1144 1145 1146
#ifdef HAVE_LINUX_RTNETLINK_H
    struct ifchange *ifchange = (struct ifchange *)obj;

    list_add_tail( &ifchange->sockets, &sock->ifchange_entry );
#endif
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
}

/* create a new ifchange queue for a specific socket or, if one already exists, reuse the existing one */
static struct async_queue *sock_get_ifchange_q( struct sock *sock )
{
    struct object *ifchange;

    if (sock->ifchange_q) /* reuse existing ifchange_q for this socket */
        return sock->ifchange_q;

    if (!(ifchange = get_ifchange()))
        return NULL;

    /* create the ifchange notification queue */
1161
    sock->ifchange_q = create_async_queue( sock->fd );
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
    if (!sock->ifchange_q)
    {
        release_object( ifchange );
        set_error( STATUS_NO_MEMORY );
        return NULL;
    }

    /* add the socket to the ifchange notification list */
    ifchange_add_sock( ifchange, sock );
    sock->ifchange_obj = ifchange;
    return sock->ifchange_q;
}

/* destroy an existing ifchange queue for a specific socket */
static void sock_destroy_ifchange_q( struct sock *sock )
{
    if (sock->ifchange_q)
    {
        list_remove( &sock->ifchange_entry );
        free_async_queue( sock->ifchange_q );
        sock->ifchange_q = NULL;
        release_object( sock->ifchange_obj );
    }
}

1187 1188 1189 1190 1191
/* create a socket */
DECL_HANDLER(create_socket)
{
    struct object *obj;

1192
    reply->handle = 0;
1193
    if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
1194
    {
1195
        reply->handle = alloc_handle( current->process, obj, req->access, req->attributes );
1196 1197 1198 1199 1200 1201 1202
        release_object( obj );
    }
}

/* accept a socket */
DECL_HANDLER(accept_socket)
{
1203
    struct sock *sock;
1204

1205
    reply->handle = 0;
1206
    if ((sock = accept_socket( req->lhandle )) != NULL)
1207
    {
1208
        reply->handle = alloc_handle( current->process, &sock->obj, req->access, req->attributes );
1209 1210 1211
        sock->wparam = reply->handle;  /* wparam for message is the socket handle */
        sock_reselect( sock );
        release_object( &sock->obj );
1212 1213 1214
    }
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
/* accept a socket into an initialized socket */
DECL_HANDLER(accept_into_socket)
{
    struct sock *sock, *acceptsock;
    const int all_attributes = FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|FILE_READ_DATA;

    if (!(sock = (struct sock *)get_handle_obj( current->process, req->lhandle,
                                                all_attributes, &sock_ops)))
        return;

    if (!(acceptsock = (struct sock *)get_handle_obj( current->process, req->ahandle,
                                                      all_attributes, &sock_ops)))
    {
        release_object( sock );
        return;
    }

    if (accept_into_socket( sock, acceptsock ))
    {
        acceptsock->wparam = req->ahandle;  /* wparam for message is the socket handle */
        sock_reselect( acceptsock );
    }
    release_object( acceptsock );
    release_object( sock );
}

1241 1242 1243 1244
/* set socket event parameters */
DECL_HANDLER(set_socket_event)
{
    struct sock *sock;
1245
    struct event *old_event;
1246

1247 1248
    if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
                                                FILE_WRITE_ATTRIBUTES, &sock_ops))) return;
1249
    old_event = sock->event;
1250
    sock->mask    = req->mask;
1251
    sock->hmask   &= ~req->mask; /* re-enable held events */
1252 1253 1254 1255 1256 1257
    sock->event   = NULL;
    sock->window  = req->window;
    sock->message = req->msg;
    sock->wparam  = req->handle;  /* wparam is the socket handle */
    if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );

1258
    if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
1259

1260
    sock_reselect( sock );
1261

1262
    sock->state |= FD_WINE_NONBLOCKING;
1263

1264
    /* if a network event is pending, signal the event object
1265
       it is possible that FD_CONNECT or FD_ACCEPT network events has happened
1266
       before a WSAEventSelect() was done on it.
1267
       (when dealing with Asynchronous socket)  */
1268
    sock_wake_up( sock );
1269 1270

    if (old_event) release_object( old_event ); /* we're through with it */
1271 1272 1273 1274 1275 1276 1277
    release_object( &sock->obj );
}

/* get socket event parameters */
DECL_HANDLER(get_socket_event)
{
    struct sock *sock;
1278 1279
    int i;
    int errors[FD_MAX_EVENTS];
1280

1281
    sock = (struct sock *)get_handle_obj( current->process, req->handle, FILE_READ_ATTRIBUTES, &sock_ops );
1282 1283
    if (!sock)
    {
1284 1285 1286 1287
        reply->mask  = 0;
        reply->pmask = 0;
        reply->state = 0;
        return;
1288
    }
1289 1290 1291
    reply->mask  = sock->mask;
    reply->pmask = sock->pmask;
    reply->state = sock->state;
1292 1293 1294 1295
    for (i = 0; i < FD_MAX_EVENTS; i++)
        errors[i] = sock_get_ntstatus(sock->errors[i]);

    set_reply_data( errors, min( get_reply_max_size(), sizeof(errors) ));
1296

1297 1298
    if (req->service)
    {
1299
        if (req->c_event)
1300
        {
1301 1302 1303
            struct event *cevent = get_event_obj( current->process, req->c_event,
                                                  EVENT_MODIFY_STATE );
            if (cevent)
1304 1305 1306 1307
            {
                reset_event( cevent );
                release_object( cevent );
            }
1308
        }
1309 1310
        sock->pmask = 0;
        sock_reselect( sock );
1311 1312 1313 1314 1315 1316 1317 1318 1319
    }
    release_object( &sock->obj );
}

/* re-enable pending socket events */
DECL_HANDLER(enable_socket_event)
{
    struct sock *sock;

1320
    if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
1321
                                               FILE_WRITE_ATTRIBUTES, &sock_ops)))
1322 1323
        return;

1324 1325 1326
    /* for event-based notification, windows erases stale events */
    sock->pmask &= ~req->mask;

1327 1328 1329
    sock->hmask &= ~req->mask;
    sock->state |= req->sstate;
    sock->state &= ~req->cstate;
1330
    if ( sock->type != SOCK_STREAM ) sock->state &= ~STREAM_FLAG_MASK;
1331

1332
    sock_reselect( sock );
1333

1334 1335
    release_object( &sock->obj );
}
1336 1337 1338 1339 1340

DECL_HANDLER(set_socket_deferred)
{
    struct sock *sock, *acceptsock;

1341
    sock=(struct sock *)get_handle_obj( current->process, req->handle, FILE_WRITE_ATTRIBUTES, &sock_ops );
1342 1343
    if ( !sock )
        return;
1344

1345
    acceptsock = (struct sock *)get_handle_obj( current->process, req->deferred, 0, &sock_ops );
1346 1347
    if ( !acceptsock )
    {
1348
        release_object( sock );
1349 1350 1351
        return;
    }
    sock->deferred = acceptsock;
1352
    release_object( sock );
1353
}
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367

DECL_HANDLER(get_socket_info)
{
    struct sock *sock;

    sock = (struct sock *)get_handle_obj( current->process, req->handle, FILE_READ_ATTRIBUTES, &sock_ops );
    if (!sock) return;

    reply->family   = sock->family;
    reply->type     = sock->type;
    reply->protocol = sock->proto;

    release_object( &sock->obj );
}