blob: 09318d4ea5534f470664e4c8a67ad270bdd9ad02 [file] [log] [blame]
Benny Prijono4766ffe2005-11-01 17:56:59 +00001/* $Id$
Benny Prijonodd859a62005-11-01 16:42:51 +00002 */
Benny Prijonodd859a62005-11-01 16:42:51 +00003/*
4 * sock_select.c
5 *
6 * This is the implementation of IOQueue using pj_sock_select().
7 * It runs anywhere where pj_sock_select() is available (currently
8 * Win32, Linux, Linux kernel, etc.).
9 */
10
11#include <pj/ioqueue.h>
12#include <pj/os.h>
13#include <pj/lock.h>
14#include <pj/log.h>
15#include <pj/list.h>
16#include <pj/pool.h>
17#include <pj/string.h>
18#include <pj/assert.h>
19#include <pj/sock.h>
20#include <pj/compat/socket.h>
21#include <pj/sock_select.h>
22#include <pj/errno.h>
Benny Prijonobc986152005-11-06 16:50:38 +000023
24/*
25 * Include declaration from common abstraction.
26 */
27#include "ioqueue_common_abs.h"
Benny Prijonodd859a62005-11-01 16:42:51 +000028
29/*
30 * ISSUES with ioqueue_select()
31 *
32 * EAGAIN/EWOULDBLOCK error in recv():
33 * - when multiple threads are working with the ioqueue, application
34 * may receive EAGAIN or EWOULDBLOCK in the receive callback.
35 * This error happens because more than one thread is watching for
36 * the same descriptor set, so when all of them call recv() or recvfrom()
37 * simultaneously, only one will succeed and the rest will get the error.
38 *
39 */
40#define THIS_FILE "ioq_select"
Benny Prijonobc986152005-11-06 16:50:38 +000041
42/*
43 * The select ioqueue relies on socket functions (pj_sock_xxx()) to return
44 * the correct error code.
45 */
46#if PJ_RETURN_OS_ERROR(100) != PJ_STATUS_FROM_OS(100)
47# error "Error reporting must be enabled for this function to work!"
48#endif
49
50/**
51 * Get the number of descriptors in the set. This is defined in sock_select.c
52 * This function will only return the number of sockets set from PJ_FD_SET
53 * operation. When the set is modified by other means (such as by select()),
54 * the count will not be reflected here.
55 *
56 * That's why don't export this function in the header file, to avoid
57 * misunderstanding.
58 *
59 * @param fdsetp The descriptor set.
60 *
61 * @return Number of descriptors in the set.
62 */
63PJ_DECL(pj_size_t) PJ_FD_COUNT(const pj_fd_set_t *fdsetp);
64
Benny Prijonodd859a62005-11-01 16:42:51 +000065
Benny Prijonodd859a62005-11-01 16:42:51 +000066/*
67 * During debugging build, VALIDATE_FD_SET is set.
68 * This will check the validity of the fd_sets.
Benny Prijono40ce3fb2005-11-07 18:14:08 +000069 */
Benny Prijono6d5db772005-11-06 17:47:14 +000070/*
Benny Prijonodd859a62005-11-01 16:42:51 +000071#if defined(PJ_DEBUG) && PJ_DEBUG != 0
72# define VALIDATE_FD_SET 1
73#else
74# define VALIDATE_FD_SET 0
75#endif
Benny Prijono40ce3fb2005-11-07 18:14:08 +000076*/
77#define VALIDATE_FD_SET 0
Benny Prijonobc986152005-11-06 16:50:38 +000078
Benny Prijonodd859a62005-11-01 16:42:51 +000079/*
80 * This describes each key.
81 */
82struct pj_ioqueue_key_t
Benny Prijonobc986152005-11-06 16:50:38 +000083{
Benny Prijono4d974f32005-11-06 13:32:11 +000084 DECLARE_COMMON_KEY
Benny Prijonodd859a62005-11-01 16:42:51 +000085};
86
87/*
88 * This describes the I/O queue itself.
89 */
90struct pj_ioqueue_t
91{
Benny Prijonobc986152005-11-06 16:50:38 +000092 DECLARE_COMMON_IOQUEUE
Benny Prijono4d974f32005-11-06 13:32:11 +000093
Benny Prijonodd859a62005-11-01 16:42:51 +000094 unsigned max, count;
Benny Prijonoa9946d52005-11-06 09:37:47 +000095 pj_ioqueue_key_t key_list;
Benny Prijonodd859a62005-11-01 16:42:51 +000096 pj_fd_set_t rfdset;
97 pj_fd_set_t wfdset;
98#if PJ_HAS_TCP
99 pj_fd_set_t xfdset;
100#endif
101};
Benny Prijonobc986152005-11-06 16:50:38 +0000102
103/* Include implementation for common abstraction after we declare
104 * pj_ioqueue_key_t and pj_ioqueue_t.
105 */
106#include "ioqueue_common_abs.c"
Benny Prijonodd859a62005-11-01 16:42:51 +0000107
108/*
Benny Prijono40ce3fb2005-11-07 18:14:08 +0000109 * pj_ioqueue_name()
110 */
111PJ_DEF(const char*) pj_ioqueue_name(void)
112{
113 return "select";
114}
115
116/*
Benny Prijonodd859a62005-11-01 16:42:51 +0000117 * pj_ioqueue_create()
118 *
119 * Create select ioqueue.
120 */
121PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
122 pj_size_t max_fd,
Benny Prijonodd859a62005-11-01 16:42:51 +0000123 pj_ioqueue_t **p_ioqueue)
124{
Benny Prijonobc986152005-11-06 16:50:38 +0000125 pj_ioqueue_t *ioqueue;
Benny Prijono4d974f32005-11-06 13:32:11 +0000126 pj_lock_t *lock;
Benny Prijonodd859a62005-11-01 16:42:51 +0000127 pj_status_t rc;
Benny Prijonobc986152005-11-06 16:50:38 +0000128
129 /* Check that arguments are valid. */
130 PJ_ASSERT_RETURN(pool != NULL && p_ioqueue != NULL &&
131 max_fd > 0 && max_fd <= PJ_IOQUEUE_MAX_HANDLES,
132 PJ_EINVAL);
133
134 /* Check that size of pj_ioqueue_op_key_t is sufficient */
135 PJ_ASSERT_RETURN(sizeof(pj_ioqueue_op_key_t)-sizeof(void*) >=
136 sizeof(union operation_key), PJ_EBUG);
137
138 ioqueue = pj_pool_alloc(pool, sizeof(pj_ioqueue_t));
139
140 ioqueue_init(ioqueue);
Benny Prijono4d974f32005-11-06 13:32:11 +0000141
Benny Prijonoa9946d52005-11-06 09:37:47 +0000142 ioqueue->max = max_fd;
143 ioqueue->count = 0;
144 PJ_FD_ZERO(&ioqueue->rfdset);
145 PJ_FD_ZERO(&ioqueue->wfdset);
Benny Prijonodd859a62005-11-01 16:42:51 +0000146#if PJ_HAS_TCP
Benny Prijonoa9946d52005-11-06 09:37:47 +0000147 PJ_FD_ZERO(&ioqueue->xfdset);
Benny Prijonodd859a62005-11-01 16:42:51 +0000148#endif
Benny Prijonoa9946d52005-11-06 09:37:47 +0000149 pj_list_init(&ioqueue->key_list);
Benny Prijonodd859a62005-11-01 16:42:51 +0000150
Benny Prijono4d974f32005-11-06 13:32:11 +0000151 rc = pj_lock_create_simple_mutex(pool, "ioq%p", &lock);
Benny Prijonodd859a62005-11-01 16:42:51 +0000152 if (rc != PJ_SUCCESS)
153 return rc;
154
Benny Prijonobc986152005-11-06 16:50:38 +0000155 rc = pj_ioqueue_set_lock(ioqueue, lock, PJ_TRUE);
156 if (rc != PJ_SUCCESS)
Benny Prijono4d974f32005-11-06 13:32:11 +0000157 return rc;
Benny Prijonodd859a62005-11-01 16:42:51 +0000158
Benny Prijonoa9946d52005-11-06 09:37:47 +0000159 PJ_LOG(4, ("pjlib", "select() I/O Queue created (%p)", ioqueue));
Benny Prijonodd859a62005-11-01 16:42:51 +0000160
Benny Prijonoa9946d52005-11-06 09:37:47 +0000161 *p_ioqueue = ioqueue;
Benny Prijonodd859a62005-11-01 16:42:51 +0000162 return PJ_SUCCESS;
163}
164
165/*
166 * pj_ioqueue_destroy()
167 *
168 * Destroy ioqueue.
169 */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000170PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioqueue)
Benny Prijonodd859a62005-11-01 16:42:51 +0000171{
Benny Prijonoa9946d52005-11-06 09:37:47 +0000172 PJ_ASSERT_RETURN(ioqueue, PJ_EINVAL);
Benny Prijonobc986152005-11-06 16:50:38 +0000173
174 pj_lock_acquire(ioqueue->lock);
Benny Prijono4d974f32005-11-06 13:32:11 +0000175 return ioqueue_destroy(ioqueue);
Benny Prijonodd859a62005-11-01 16:42:51 +0000176}
177
178
179/*
Benny Prijonodd859a62005-11-01 16:42:51 +0000180 * pj_ioqueue_register_sock()
181 *
182 * Register a handle to ioqueue.
183 */
184PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
Benny Prijonoa9946d52005-11-06 09:37:47 +0000185 pj_ioqueue_t *ioqueue,
Benny Prijonodd859a62005-11-01 16:42:51 +0000186 pj_sock_t sock,
187 void *user_data,
188 const pj_ioqueue_callback *cb,
189 pj_ioqueue_key_t **p_key)
190{
191 pj_ioqueue_key_t *key = NULL;
192 pj_uint32_t value;
193 pj_status_t rc = PJ_SUCCESS;
194
Benny Prijonoa9946d52005-11-06 09:37:47 +0000195 PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET &&
Benny Prijonodd859a62005-11-01 16:42:51 +0000196 cb && p_key, PJ_EINVAL);
197
Benny Prijonoa9946d52005-11-06 09:37:47 +0000198 pj_lock_acquire(ioqueue->lock);
Benny Prijonodd859a62005-11-01 16:42:51 +0000199
Benny Prijonoa9946d52005-11-06 09:37:47 +0000200 if (ioqueue->count >= ioqueue->max) {
Benny Prijonodd859a62005-11-01 16:42:51 +0000201 rc = PJ_ETOOMANY;
202 goto on_return;
203 }
204
205 /* Set socket to nonblocking. */
206 value = 1;
207#ifdef PJ_WIN32
Benny Prijonoa9946d52005-11-06 09:37:47 +0000208 if (ioctlsocket(sock, FIONBIO, (u_long*)&value)) {
Benny Prijonodd859a62005-11-01 16:42:51 +0000209#else
210 if (ioctl(sock, FIONBIO, &value)) {
211#endif
212 rc = pj_get_netos_error();
213 goto on_return;
214 }
215
216 /* Create key. */
Benny Prijonobc986152005-11-06 16:50:38 +0000217 key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
218 rc = ioqueue_init_key(pool, ioqueue, key, sock, user_data, cb);
219 if (rc != PJ_SUCCESS) {
220 key = NULL;
221 goto on_return;
222 }
Benny Prijonodd859a62005-11-01 16:42:51 +0000223
224 /* Register */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000225 pj_list_insert_before(&ioqueue->key_list, key);
226 ++ioqueue->count;
Benny Prijonodd859a62005-11-01 16:42:51 +0000227
Benny Prijonobc986152005-11-06 16:50:38 +0000228on_return:
Benny Prijonoa9946d52005-11-06 09:37:47 +0000229 /* On error, socket may be left in non-blocking mode. */
Benny Prijonodd859a62005-11-01 16:42:51 +0000230 *p_key = key;
Benny Prijonoa9946d52005-11-06 09:37:47 +0000231 pj_lock_release(ioqueue->lock);
Benny Prijonodd859a62005-11-01 16:42:51 +0000232
233 return rc;
234}
235
236/*
237 * pj_ioqueue_unregister()
238 *
239 * Unregister handle from ioqueue.
240 */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000241PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
Benny Prijonobc986152005-11-06 16:50:38 +0000242{
243 pj_ioqueue_t *ioqueue;
Benny Prijonodd859a62005-11-01 16:42:51 +0000244
Benny Prijonoa9946d52005-11-06 09:37:47 +0000245 PJ_ASSERT_RETURN(key, PJ_EINVAL);
Benny Prijonobc986152005-11-06 16:50:38 +0000246
247 ioqueue = key->ioqueue;
248
Benny Prijonoa9946d52005-11-06 09:37:47 +0000249 pj_lock_acquire(ioqueue->lock);
250
251 pj_assert(ioqueue->count > 0);
252 --ioqueue->count;
Benny Prijonodd859a62005-11-01 16:42:51 +0000253 pj_list_erase(key);
Benny Prijonoa9946d52005-11-06 09:37:47 +0000254 PJ_FD_CLR(key->fd, &ioqueue->rfdset);
255 PJ_FD_CLR(key->fd, &ioqueue->wfdset);
Benny Prijonodd859a62005-11-01 16:42:51 +0000256#if PJ_HAS_TCP
Benny Prijonoa9946d52005-11-06 09:37:47 +0000257 PJ_FD_CLR(key->fd, &ioqueue->xfdset);
Benny Prijonodd859a62005-11-01 16:42:51 +0000258#endif
Benny Prijonobc986152005-11-06 16:50:38 +0000259
260 /* ioqueue_destroy may try to acquire key's mutex.
261 * Since normally the order of locking is to lock key's mutex first
262 * then ioqueue's mutex, ioqueue_destroy may deadlock unless we
263 * release ioqueue's mutex first.
264 */
265 pj_lock_release(ioqueue->lock);
266
267 /* Destroy the key. */
268 ioqueue_destroy_key(key);
Benny Prijono4d974f32005-11-06 13:32:11 +0000269
Benny Prijonodd859a62005-11-01 16:42:51 +0000270 return PJ_SUCCESS;
271}
272
Benny Prijonobc986152005-11-06 16:50:38 +0000273
Benny Prijonodd859a62005-11-01 16:42:51 +0000274/* This supposed to check whether the fd_set values are consistent
275 * with the operation currently set in each key.
276 */
277#if VALIDATE_FD_SET
Benny Prijonoa9946d52005-11-06 09:37:47 +0000278static void validate_sets(const pj_ioqueue_t *ioqueue,
Benny Prijonodd859a62005-11-01 16:42:51 +0000279 const pj_fd_set_t *rfdset,
280 const pj_fd_set_t *wfdset,
281 const pj_fd_set_t *xfdset)
282{
283 pj_ioqueue_key_t *key;
Benny Prijono40ce3fb2005-11-07 18:14:08 +0000284
285 /*
286 * This basicly would not work anymore.
287 * We need to lock key before performing the check, but we can't do
288 * so because we're holding ioqueue mutex. If we acquire key's mutex
289 * now, the will cause deadlock.
290 */
291 pj_assert(0);
Benny Prijonodd859a62005-11-01 16:42:51 +0000292
Benny Prijonoa9946d52005-11-06 09:37:47 +0000293 key = ioqueue->key_list.next;
294 while (key != &ioqueue->key_list) {
295 if (!pj_list_empty(&key->read_list)
Benny Prijonodd859a62005-11-01 16:42:51 +0000296#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
Benny Prijonoa9946d52005-11-06 09:37:47 +0000297 || !pj_list_empty(&key->accept_list)
Benny Prijonodd859a62005-11-01 16:42:51 +0000298#endif
299 )
300 {
301 pj_assert(PJ_FD_ISSET(key->fd, rfdset));
302 }
303 else {
304 pj_assert(PJ_FD_ISSET(key->fd, rfdset) == 0);
305 }
Benny Prijonoa9946d52005-11-06 09:37:47 +0000306 if (!pj_list_empty(&key->write_list)
Benny Prijonodd859a62005-11-01 16:42:51 +0000307#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
Benny Prijonoa9946d52005-11-06 09:37:47 +0000308 || key->connecting
Benny Prijonodd859a62005-11-01 16:42:51 +0000309#endif
310 )
311 {
312 pj_assert(PJ_FD_ISSET(key->fd, wfdset));
313 }
314 else {
315 pj_assert(PJ_FD_ISSET(key->fd, wfdset) == 0);
316 }
317#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
Benny Prijonoa9946d52005-11-06 09:37:47 +0000318 if (key->connecting)
Benny Prijonodd859a62005-11-01 16:42:51 +0000319 {
320 pj_assert(PJ_FD_ISSET(key->fd, xfdset));
321 }
322 else {
323 pj_assert(PJ_FD_ISSET(key->fd, xfdset) == 0);
324 }
325#endif /* PJ_HAS_TCP */
326
327 key = key->next;
328 }
329}
330#endif /* VALIDATE_FD_SET */
331
Benny Prijonobc986152005-11-06 16:50:38 +0000332
333/* ioqueue_remove_from_set()
334 * This function is called from ioqueue_dispatch_event() to instruct
335 * the ioqueue to remove the specified descriptor from ioqueue's descriptor
336 * set for the specified event.
337 */
338static void ioqueue_remove_from_set( pj_ioqueue_t *ioqueue,
339 pj_sock_t fd,
340 enum ioqueue_event_type event_type)
341{
342 pj_lock_acquire(ioqueue->lock);
343
344 if (event_type == READABLE_EVENT)
345 PJ_FD_CLR((pj_sock_t)fd, &ioqueue->rfdset);
346 else if (event_type == WRITEABLE_EVENT)
347 PJ_FD_CLR((pj_sock_t)fd, &ioqueue->wfdset);
348 else if (event_type == EXCEPTION_EVENT)
349 PJ_FD_CLR((pj_sock_t)fd, &ioqueue->xfdset);
350 else
351 pj_assert(0);
352
353 pj_lock_release(ioqueue->lock);
354}
355
356/*
357 * ioqueue_add_to_set()
358 * This function is called from pj_ioqueue_recv(), pj_ioqueue_send() etc
359 * to instruct the ioqueue to add the specified handle to ioqueue's descriptor
360 * set for the specified event.
361 */
362static void ioqueue_add_to_set( pj_ioqueue_t *ioqueue,
363 pj_sock_t fd,
364 enum ioqueue_event_type event_type )
365{
366 pj_lock_acquire(ioqueue->lock);
367
368 if (event_type == READABLE_EVENT)
369 PJ_FD_SET((pj_sock_t)fd, &ioqueue->rfdset);
370 else if (event_type == WRITEABLE_EVENT)
371 PJ_FD_SET((pj_sock_t)fd, &ioqueue->wfdset);
372 else if (event_type == EXCEPTION_EVENT)
373 PJ_FD_SET((pj_sock_t)fd, &ioqueue->xfdset);
374 else
375 pj_assert(0);
376
377 pj_lock_release(ioqueue->lock);
378}
Benny Prijonodd859a62005-11-01 16:42:51 +0000379
380/*
381 * pj_ioqueue_poll()
382 *
383 * Few things worth written:
384 *
385 * - we used to do only one callback called per poll, but it didn't go
386 * very well. The reason is because on some situation, the write
387 * callback gets called all the time, thus doesn't give the read
388 * callback to get called. This happens, for example, when user
389 * submit write operation inside the write callback.
390 * As the result, we changed the behaviour so that now multiple
391 * callbacks are called in a single poll. It should be fast too,
392 * just that we need to be carefull with the ioqueue data structs.
393 *
394 * - to guarantee preemptiveness etc, the poll function must strictly
395 * work on fd_set copy of the ioqueue (not the original one).
396 */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000397PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
Benny Prijonodd859a62005-11-01 16:42:51 +0000398{
399 pj_fd_set_t rfdset, wfdset, xfdset;
Benny Prijono4d974f32005-11-06 13:32:11 +0000400 int count, counter;
Benny Prijonodd859a62005-11-01 16:42:51 +0000401 pj_ioqueue_key_t *h;
Benny Prijonobc986152005-11-06 16:50:38 +0000402 struct event
403 {
404 pj_ioqueue_key_t *key;
405 enum ioqueue_event_type event_type;
406 } event[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL];
407
408 PJ_ASSERT_RETURN(ioqueue, PJ_EINVAL);
Benny Prijonodd859a62005-11-01 16:42:51 +0000409
410 /* Lock ioqueue before making fd_set copies */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000411 pj_lock_acquire(ioqueue->lock);
Benny Prijonobc986152005-11-06 16:50:38 +0000412
413 /* We will only do select() when there are sockets to be polled.
414 * Otherwise select() will return error.
Benny Prijonoa9946d52005-11-06 09:37:47 +0000415 */
416 if (PJ_FD_COUNT(&ioqueue->rfdset)==0 &&
417 PJ_FD_COUNT(&ioqueue->wfdset)==0 &&
418 PJ_FD_COUNT(&ioqueue->xfdset)==0)
Benny Prijonodd859a62005-11-01 16:42:51 +0000419 {
Benny Prijonoa9946d52005-11-06 09:37:47 +0000420 pj_lock_release(ioqueue->lock);
Benny Prijonodd859a62005-11-01 16:42:51 +0000421 if (timeout)
422 pj_thread_sleep(PJ_TIME_VAL_MSEC(*timeout));
423 return 0;
424 }
425
426 /* Copy ioqueue's pj_fd_set_t to local variables. */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000427 pj_memcpy(&rfdset, &ioqueue->rfdset, sizeof(pj_fd_set_t));
428 pj_memcpy(&wfdset, &ioqueue->wfdset, sizeof(pj_fd_set_t));
Benny Prijonodd859a62005-11-01 16:42:51 +0000429#if PJ_HAS_TCP
Benny Prijonoa9946d52005-11-06 09:37:47 +0000430 pj_memcpy(&xfdset, &ioqueue->xfdset, sizeof(pj_fd_set_t));
Benny Prijonodd859a62005-11-01 16:42:51 +0000431#else
432 PJ_FD_ZERO(&xfdset);
433#endif
434
435#if VALIDATE_FD_SET
Benny Prijonoa9946d52005-11-06 09:37:47 +0000436 validate_sets(ioqueue, &rfdset, &wfdset, &xfdset);
Benny Prijonodd859a62005-11-01 16:42:51 +0000437#endif
438
439 /* Unlock ioqueue before select(). */
Benny Prijonoa9946d52005-11-06 09:37:47 +0000440 pj_lock_release(ioqueue->lock);
Benny Prijonodd859a62005-11-01 16:42:51 +0000441
442 count = pj_sock_select(FD_SETSIZE, &rfdset, &wfdset, &xfdset, timeout);
443
444 if (count <= 0)
445 return count;
Benny Prijonobc986152005-11-06 16:50:38 +0000446 else if (count > PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL)
447 count = PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL;
Benny Prijonodd859a62005-11-01 16:42:51 +0000448
Benny Prijonobc986152005-11-06 16:50:38 +0000449 /* Scan descriptor sets for event and add the events in the event
450 * array to be processed later in this function. We do this so that
451 * events can be processed in parallel without holding ioqueue lock.
Benny Prijonoa9946d52005-11-06 09:37:47 +0000452 */
453 pj_lock_acquire(ioqueue->lock);
Benny Prijonodd859a62005-11-01 16:42:51 +0000454
Benny Prijonobc986152005-11-06 16:50:38 +0000455 counter = 0;
456
457 /* Scan for writable sockets first to handle piggy-back data
458 * coming with accept().
459 */
460 h = ioqueue->key_list.next;
461 for ( ; h!=&ioqueue->key_list && counter<count; h = h->next) {
462 if ( (key_has_pending_write(h) || key_has_pending_connect(h))
463 && PJ_FD_ISSET(h->fd, &wfdset))
464 {
465 event[counter].key = h;
466 event[counter].event_type = WRITEABLE_EVENT;
467 ++counter;
468 }
469
470 /* Scan for readable socket. */
471 if ((key_has_pending_read(h) || key_has_pending_accept(h))
472 && PJ_FD_ISSET(h->fd, &rfdset))
473 {
474 event[counter].key = h;
475 event[counter].event_type = READABLE_EVENT;
476 ++counter;
477 }
478
479#if PJ_HAS_TCP
480 if (key_has_pending_connect(h) && PJ_FD_ISSET(h->fd, &xfdset)) {
481 event[counter].key = h;
482 event[counter].event_type = EXCEPTION_EVENT;
483 ++counter;
484 }
485#endif
486 }
487
488 pj_lock_release(ioqueue->lock);
489
490 count = counter;
491
492 /* Now process all events. The dispatch functions will take care
493 * of locking in each of the key
494 */
495 for (counter=0; counter<count; ++counter) {
496 switch (event[counter].event_type) {
497 case READABLE_EVENT:
498 ioqueue_dispatch_read_event(ioqueue, event[counter].key);
499 break;
500 case WRITEABLE_EVENT:
501 ioqueue_dispatch_write_event(ioqueue, event[counter].key);
502 break;
503 case EXCEPTION_EVENT:
504 ioqueue_dispatch_exception_event(ioqueue, event[counter].key);
505 break;
506 case NO_EVENT:
507 pj_assert(!"Invalid event!");
508 break;
509 }
510 }
Benny Prijonodd859a62005-11-01 16:42:51 +0000511
Benny Prijonodd859a62005-11-01 16:42:51 +0000512 return count;
513}
514