正在显示
3 个修改的文件
包含
11 行增加
和
994 行删除
@@ -55,21 +55,6 @@ DEFINES = -D$(OS) -DDEBUG -DMD_HAVE_EPOLL -DMALLOC_STACK | @@ -55,21 +55,6 @@ DEFINES = -D$(OS) -DDEBUG -DMD_HAVE_EPOLL -DMALLOC_STACK | ||
55 | 55 | ||
56 | ########################## | 56 | ########################## |
57 | # Other possible defines: | 57 | # Other possible defines: |
58 | -# To use poll(2) instead of select(2) for events checking: | ||
59 | -# DEFINES += -DUSE_POLL | ||
60 | -# You may prefer to use select for applications that have many threads | ||
61 | -# using one file descriptor, and poll for applications that have many | ||
62 | -# different file descriptors. With USE_POLL poll() is called with at | ||
63 | -# least one pollfd per I/O-blocked thread, so 1000 threads sharing one | ||
64 | -# descriptor will poll 1000 identical pollfds and select would be more | ||
65 | -# efficient. But if the threads all use different descriptors poll() | ||
66 | -# may be better depending on your operating system's implementation of | ||
67 | -# poll and select. Really, it's up to you. Oh, and on some platforms | ||
68 | -# poll() fails with more than a few dozen descriptors. | ||
69 | -# | ||
70 | -# Some platforms allow to define FD_SETSIZE (if select() is used), e.g.: | ||
71 | -# DEFINES += -DFD_SETSIZE=4096 | ||
72 | -# | ||
73 | # To use malloc(3) instead of mmap(2) for stack allocation: | 58 | # To use malloc(3) instead of mmap(2) for stack allocation: |
74 | # DEFINES += -DMALLOC_STACK | 59 | # DEFINES += -DMALLOC_STACK |
75 | # | 60 | # |
@@ -77,25 +62,9 @@ DEFINES = -D$(OS) -DDEBUG -DMD_HAVE_EPOLL -DMALLOC_STACK | @@ -77,25 +62,9 @@ DEFINES = -D$(OS) -DDEBUG -DMD_HAVE_EPOLL -DMALLOC_STACK | ||
77 | # (but not too many!): | 62 | # (but not too many!): |
78 | # DEFINES += -DST_KEYS_MAX=<n> | 63 | # DEFINES += -DST_KEYS_MAX=<n> |
79 | # | 64 | # |
80 | -# To start with more than the default 64 initial pollfd slots | ||
81 | -# (but the table grows dynamically anyway): | ||
82 | -# DEFINES += -DST_MIN_POLLFDS_SIZE=<n> | ||
83 | -# | ||
84 | # Note that you can also add these defines by specifying them as | 65 | # Note that you can also add these defines by specifying them as |
85 | # make/gmake arguments (without editing this Makefile). For example: | 66 | # make/gmake arguments (without editing this Makefile). For example: |
86 | # | 67 | # |
87 | -# make EXTRA_CFLAGS=-DUSE_POLL <target> | ||
88 | -# | ||
89 | -# (replace make with gmake if needed). | ||
90 | -# | ||
91 | -# You can also modify the default selection of an alternative event | ||
92 | -# notification mechanism. E.g., to enable kqueue(2) support (if it's not | ||
93 | -# enabled by default): | ||
94 | -# | ||
95 | -# gmake EXTRA_CFLAGS=-DMD_HAVE_KQUEUE <target> | ||
96 | -# | ||
97 | -# or to disable default epoll(4) support: | ||
98 | -# | ||
99 | # make EXTRA_CFLAGS=-UMD_HAVE_EPOLL <target> | 68 | # make EXTRA_CFLAGS=-UMD_HAVE_EPOLL <target> |
100 | # | 69 | # |
101 | ########################## | 70 | ########################## |
@@ -40,80 +40,21 @@ | @@ -40,80 +40,21 @@ | ||
40 | #include <errno.h> | 40 | #include <errno.h> |
41 | #include "common.h" | 41 | #include "common.h" |
42 | 42 | ||
43 | -#ifdef MD_HAVE_KQUEUE | ||
44 | -#include <sys/event.h> | ||
45 | -#endif | ||
46 | -#ifdef MD_HAVE_EPOLL | ||
47 | -#include <sys/epoll.h> | 43 | +#ifdef USE_POLL |
44 | + #error "Not support USE_POLL" | ||
48 | #endif | 45 | #endif |
49 | - | ||
50 | -#if defined(USE_POLL) && !defined(MD_HAVE_POLL) | ||
51 | -/* Force poll usage if explicitly asked for it */ | ||
52 | -#define MD_HAVE_POLL | 46 | +#ifdef MD_HAVE_KQUEUE |
47 | + #error "Not support MD_HAVE_KQUEUE" | ||
53 | #endif | 48 | #endif |
54 | - | ||
55 | - | ||
56 | -static struct _st_seldata { | ||
57 | - fd_set fd_read_set, fd_write_set, fd_exception_set; | ||
58 | - int fd_ref_cnts[FD_SETSIZE][3]; | ||
59 | - int maxfd; | ||
60 | -} *_st_select_data; | ||
61 | - | ||
62 | -#define _ST_SELECT_MAX_OSFD (_st_select_data->maxfd) | ||
63 | -#define _ST_SELECT_READ_SET (_st_select_data->fd_read_set) | ||
64 | -#define _ST_SELECT_WRITE_SET (_st_select_data->fd_write_set) | ||
65 | -#define _ST_SELECT_EXCEP_SET (_st_select_data->fd_exception_set) | ||
66 | -#define _ST_SELECT_READ_CNT(fd) (_st_select_data->fd_ref_cnts[fd][0]) | ||
67 | -#define _ST_SELECT_WRITE_CNT(fd) (_st_select_data->fd_ref_cnts[fd][1]) | ||
68 | -#define _ST_SELECT_EXCEP_CNT(fd) (_st_select_data->fd_ref_cnts[fd][2]) | ||
69 | - | ||
70 | - | ||
71 | #ifdef MD_HAVE_POLL | 49 | #ifdef MD_HAVE_POLL |
72 | -static struct _st_polldata { | ||
73 | - struct pollfd *pollfds; | ||
74 | - int pollfds_size; | ||
75 | - int fdcnt; | ||
76 | -} *_st_poll_data; | ||
77 | - | ||
78 | -#define _ST_POLL_OSFD_CNT (_st_poll_data->fdcnt) | ||
79 | -#define _ST_POLLFDS (_st_poll_data->pollfds) | ||
80 | -#define _ST_POLLFDS_SIZE (_st_poll_data->pollfds_size) | ||
81 | -#endif /* MD_HAVE_POLL */ | ||
82 | - | ||
83 | - | ||
84 | -#ifdef MD_HAVE_KQUEUE | ||
85 | -typedef struct _kq_fd_data { | ||
86 | - int rd_ref_cnt; | ||
87 | - int wr_ref_cnt; | ||
88 | - int revents; | ||
89 | -} _kq_fd_data_t; | ||
90 | - | ||
91 | -static struct _st_kqdata { | ||
92 | - _kq_fd_data_t *fd_data; | ||
93 | - struct kevent *evtlist; | ||
94 | - struct kevent *addlist; | ||
95 | - struct kevent *dellist; | ||
96 | - int fd_data_size; | ||
97 | - int evtlist_size; | ||
98 | - int addlist_size; | ||
99 | - int addlist_cnt; | ||
100 | - int dellist_size; | ||
101 | - int dellist_cnt; | ||
102 | - int kq; | ||
103 | - pid_t pid; | ||
104 | -} *_st_kq_data; | ||
105 | - | ||
106 | -#ifndef ST_KQ_MIN_EVTLIST_SIZE | ||
107 | -#define ST_KQ_MIN_EVTLIST_SIZE 64 | 50 | + #error "Not support MD_HAVE_POLL" |
51 | +#endif | ||
52 | +#ifndef MD_HAVE_EPOLL | ||
53 | + #error "Only support MD_HAVE_EPOLL" | ||
108 | #endif | 54 | #endif |
109 | 55 | ||
110 | -#define _ST_KQ_READ_CNT(fd) (_st_kq_data->fd_data[fd].rd_ref_cnt) | ||
111 | -#define _ST_KQ_WRITE_CNT(fd) (_st_kq_data->fd_data[fd].wr_ref_cnt) | ||
112 | -#define _ST_KQ_REVENTS(fd) (_st_kq_data->fd_data[fd].revents) | ||
113 | -#endif /* MD_HAVE_KQUEUE */ | ||
114 | - | 56 | +#include <sys/epoll.h> |
115 | 57 | ||
116 | -#ifdef MD_HAVE_EPOLL | ||
117 | typedef struct _epoll_fd_data { | 58 | typedef struct _epoll_fd_data { |
118 | int rd_ref_cnt; | 59 | int rd_ref_cnt; |
119 | int wr_ref_cnt; | 60 | int wr_ref_cnt; |
@@ -148,878 +89,9 @@ static struct _st_epolldata { | @@ -148,878 +89,9 @@ static struct _st_epolldata { | ||
148 | #define _ST_EPOLL_EVENTS(fd) \ | 89 | #define _ST_EPOLL_EVENTS(fd) \ |
149 | (_ST_EPOLL_READ_BIT(fd)|_ST_EPOLL_WRITE_BIT(fd)|_ST_EPOLL_EXCEP_BIT(fd)) | 90 | (_ST_EPOLL_READ_BIT(fd)|_ST_EPOLL_WRITE_BIT(fd)|_ST_EPOLL_EXCEP_BIT(fd)) |
150 | 91 | ||
151 | -#endif /* MD_HAVE_EPOLL */ | ||
152 | - | ||
153 | _st_eventsys_t *_st_eventsys = NULL; | 92 | _st_eventsys_t *_st_eventsys = NULL; |
154 | 93 | ||
155 | /***************************************** | 94 | /***************************************** |
156 | - * select event system | ||
157 | - */ | ||
158 | - | ||
159 | -ST_HIDDEN int _st_select_init(void) | ||
160 | -{ | ||
161 | - _st_select_data = (struct _st_seldata *) malloc(sizeof(*_st_select_data)); | ||
162 | - if (!_st_select_data) { | ||
163 | - return -1; | ||
164 | - } | ||
165 | - | ||
166 | - memset(_st_select_data, 0, sizeof(*_st_select_data)); | ||
167 | - _st_select_data->maxfd = -1; | ||
168 | - | ||
169 | - return 0; | ||
170 | -} | ||
171 | - | ||
172 | -ST_HIDDEN int _st_select_pollset_add(struct pollfd *pds, int npds) | ||
173 | -{ | ||
174 | - struct pollfd *pd; | ||
175 | - struct pollfd *epd = pds + npds; | ||
176 | - | ||
177 | - /* Do checks up front */ | ||
178 | - for (pd = pds; pd < epd; pd++) { | ||
179 | - if (pd->fd < 0 || pd->fd >= FD_SETSIZE || !pd->events || (pd->events & ~(POLLIN | POLLOUT | POLLPRI))) { | ||
180 | - errno = EINVAL; | ||
181 | - return -1; | ||
182 | - } | ||
183 | - } | ||
184 | - | ||
185 | - for (pd = pds; pd < epd; pd++) { | ||
186 | - if (pd->events & POLLIN) { | ||
187 | - FD_SET(pd->fd, &_ST_SELECT_READ_SET); | ||
188 | - _ST_SELECT_READ_CNT(pd->fd)++; | ||
189 | - } | ||
190 | - if (pd->events & POLLOUT) { | ||
191 | - FD_SET(pd->fd, &_ST_SELECT_WRITE_SET); | ||
192 | - _ST_SELECT_WRITE_CNT(pd->fd)++; | ||
193 | - } | ||
194 | - if (pd->events & POLLPRI) { | ||
195 | - FD_SET(pd->fd, &_ST_SELECT_EXCEP_SET); | ||
196 | - _ST_SELECT_EXCEP_CNT(pd->fd)++; | ||
197 | - } | ||
198 | - if (_ST_SELECT_MAX_OSFD < pd->fd) | ||
199 | - _ST_SELECT_MAX_OSFD = pd->fd; | ||
200 | - } | ||
201 | - | ||
202 | - return 0; | ||
203 | -} | ||
204 | - | ||
205 | -ST_HIDDEN void _st_select_pollset_del(struct pollfd *pds, int npds) | ||
206 | -{ | ||
207 | - struct pollfd *pd; | ||
208 | - struct pollfd *epd = pds + npds; | ||
209 | - | ||
210 | - for (pd = pds; pd < epd; pd++) { | ||
211 | - if (pd->events & POLLIN) { | ||
212 | - if (--_ST_SELECT_READ_CNT(pd->fd) == 0) { | ||
213 | - FD_CLR(pd->fd, &_ST_SELECT_READ_SET); | ||
214 | - } | ||
215 | - } | ||
216 | - if (pd->events & POLLOUT) { | ||
217 | - if (--_ST_SELECT_WRITE_CNT(pd->fd) == 0) { | ||
218 | - FD_CLR(pd->fd, &_ST_SELECT_WRITE_SET); | ||
219 | - } | ||
220 | - } | ||
221 | - if (pd->events & POLLPRI) { | ||
222 | - if (--_ST_SELECT_EXCEP_CNT(pd->fd) == 0) { | ||
223 | - FD_CLR(pd->fd, &_ST_SELECT_EXCEP_SET); | ||
224 | - } | ||
225 | - } | ||
226 | - } | ||
227 | -} | ||
228 | - | ||
229 | -ST_HIDDEN void _st_select_find_bad_fd(void) | ||
230 | -{ | ||
231 | - _st_clist_t *q; | ||
232 | - _st_pollq_t *pq; | ||
233 | - int notify; | ||
234 | - struct pollfd *pds, *epds; | ||
235 | - int pq_max_osfd, osfd; | ||
236 | - short events; | ||
237 | - | ||
238 | - _ST_SELECT_MAX_OSFD = -1; | ||
239 | - | ||
240 | - for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { | ||
241 | - pq = _ST_POLLQUEUE_PTR(q); | ||
242 | - notify = 0; | ||
243 | - epds = pq->pds + pq->npds; | ||
244 | - pq_max_osfd = -1; | ||
245 | - | ||
246 | - for (pds = pq->pds; pds < epds; pds++) { | ||
247 | - osfd = pds->fd; | ||
248 | - pds->revents = 0; | ||
249 | - if (pds->events == 0) { | ||
250 | - continue; | ||
251 | - } | ||
252 | - if (fcntl(osfd, F_GETFL, 0) < 0) { | ||
253 | - pds->revents = POLLNVAL; | ||
254 | - notify = 1; | ||
255 | - } | ||
256 | - if (osfd > pq_max_osfd) { | ||
257 | - pq_max_osfd = osfd; | ||
258 | - } | ||
259 | - } | ||
260 | - | ||
261 | - if (notify) { | ||
262 | - ST_REMOVE_LINK(&pq->links); | ||
263 | - pq->on_ioq = 0; | ||
264 | - /* | ||
265 | - * Decrement the count of descriptors for each descriptor/event | ||
266 | - * because this I/O request is being removed from the ioq | ||
267 | - */ | ||
268 | - for (pds = pq->pds; pds < epds; pds++) { | ||
269 | - osfd = pds->fd; | ||
270 | - events = pds->events; | ||
271 | - if (events & POLLIN) { | ||
272 | - if (--_ST_SELECT_READ_CNT(osfd) == 0) { | ||
273 | - FD_CLR(osfd, &_ST_SELECT_READ_SET); | ||
274 | - } | ||
275 | - } | ||
276 | - if (events & POLLOUT) { | ||
277 | - if (--_ST_SELECT_WRITE_CNT(osfd) == 0) { | ||
278 | - FD_CLR(osfd, &_ST_SELECT_WRITE_SET); | ||
279 | - } | ||
280 | - } | ||
281 | - if (events & POLLPRI) { | ||
282 | - if (--_ST_SELECT_EXCEP_CNT(osfd) == 0) { | ||
283 | - FD_CLR(osfd, &_ST_SELECT_EXCEP_SET); | ||
284 | - } | ||
285 | - } | ||
286 | - } | ||
287 | - | ||
288 | - if (pq->thread->flags & _ST_FL_ON_SLEEPQ) { | ||
289 | - _ST_DEL_SLEEPQ(pq->thread); | ||
290 | - } | ||
291 | - pq->thread->state = _ST_ST_RUNNABLE; | ||
292 | - _ST_ADD_RUNQ(pq->thread); | ||
293 | - } else { | ||
294 | - if (_ST_SELECT_MAX_OSFD < pq_max_osfd) { | ||
295 | - _ST_SELECT_MAX_OSFD = pq_max_osfd; | ||
296 | - } | ||
297 | - } | ||
298 | - } | ||
299 | -} | ||
300 | - | ||
301 | -ST_HIDDEN void _st_select_dispatch(void) | ||
302 | -{ | ||
303 | - struct timeval timeout, *tvp; | ||
304 | - fd_set r, w, e; | ||
305 | - fd_set *rp, *wp, *ep; | ||
306 | - int nfd, pq_max_osfd, osfd; | ||
307 | - _st_clist_t *q; | ||
308 | - st_utime_t min_timeout; | ||
309 | - _st_pollq_t *pq; | ||
310 | - int notify; | ||
311 | - struct pollfd *pds, *epds; | ||
312 | - short events, revents; | ||
313 | - | ||
314 | - /* | ||
315 | - * Assignment of fd_sets | ||
316 | - */ | ||
317 | - r = _ST_SELECT_READ_SET; | ||
318 | - w = _ST_SELECT_WRITE_SET; | ||
319 | - e = _ST_SELECT_EXCEP_SET; | ||
320 | - | ||
321 | - rp = &r; | ||
322 | - wp = &w; | ||
323 | - ep = &e; | ||
324 | - | ||
325 | - if (_ST_SLEEPQ == NULL) { | ||
326 | - tvp = NULL; | ||
327 | - } else { | ||
328 | - min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 : (_ST_SLEEPQ->due - _ST_LAST_CLOCK); | ||
329 | - timeout.tv_sec = (int) (min_timeout / 1000000); | ||
330 | - timeout.tv_usec = (int) (min_timeout % 1000000); | ||
331 | - tvp = &timeout; | ||
332 | - } | ||
333 | - | ||
334 | - /* Check for I/O operations */ | ||
335 | - nfd = select(_ST_SELECT_MAX_OSFD + 1, rp, wp, ep, tvp); | ||
336 | - | ||
337 | - /* Notify threads that are associated with the selected descriptors */ | ||
338 | - if (nfd > 0) { | ||
339 | - _ST_SELECT_MAX_OSFD = -1; | ||
340 | - for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { | ||
341 | - pq = _ST_POLLQUEUE_PTR(q); | ||
342 | - notify = 0; | ||
343 | - epds = pq->pds + pq->npds; | ||
344 | - pq_max_osfd = -1; | ||
345 | - | ||
346 | - for (pds = pq->pds; pds < epds; pds++) { | ||
347 | - osfd = pds->fd; | ||
348 | - events = pds->events; | ||
349 | - revents = 0; | ||
350 | - if ((events & POLLIN) && FD_ISSET(osfd, rp)) { | ||
351 | - revents |= POLLIN; | ||
352 | - } | ||
353 | - if ((events & POLLOUT) && FD_ISSET(osfd, wp)) { | ||
354 | - revents |= POLLOUT; | ||
355 | - } | ||
356 | - if ((events & POLLPRI) && FD_ISSET(osfd, ep)) { | ||
357 | - revents |= POLLPRI; | ||
358 | - } | ||
359 | - pds->revents = revents; | ||
360 | - if (revents) { | ||
361 | - notify = 1; | ||
362 | - } | ||
363 | - if (osfd > pq_max_osfd) { | ||
364 | - pq_max_osfd = osfd; | ||
365 | - } | ||
366 | - } | ||
367 | - if (notify) { | ||
368 | - ST_REMOVE_LINK(&pq->links); | ||
369 | - pq->on_ioq = 0; | ||
370 | - /* | ||
371 | - * Decrement the count of descriptors for each descriptor/event | ||
372 | - * because this I/O request is being removed from the ioq | ||
373 | - */ | ||
374 | - for (pds = pq->pds; pds < epds; pds++) { | ||
375 | - osfd = pds->fd; | ||
376 | - events = pds->events; | ||
377 | - if (events & POLLIN) { | ||
378 | - if (--_ST_SELECT_READ_CNT(osfd) == 0) { | ||
379 | - FD_CLR(osfd, &_ST_SELECT_READ_SET); | ||
380 | - } | ||
381 | - } | ||
382 | - if (events & POLLOUT) { | ||
383 | - if (--_ST_SELECT_WRITE_CNT(osfd) == 0) { | ||
384 | - FD_CLR(osfd, &_ST_SELECT_WRITE_SET); | ||
385 | - } | ||
386 | - } | ||
387 | - if (events & POLLPRI) { | ||
388 | - if (--_ST_SELECT_EXCEP_CNT(osfd) == 0) { | ||
389 | - FD_CLR(osfd, &_ST_SELECT_EXCEP_SET); | ||
390 | - } | ||
391 | - } | ||
392 | - } | ||
393 | - | ||
394 | - if (pq->thread->flags & _ST_FL_ON_SLEEPQ) { | ||
395 | - _ST_DEL_SLEEPQ(pq->thread); | ||
396 | - } | ||
397 | - pq->thread->state = _ST_ST_RUNNABLE; | ||
398 | - _ST_ADD_RUNQ(pq->thread); | ||
399 | - } else { | ||
400 | - if (_ST_SELECT_MAX_OSFD < pq_max_osfd) { | ||
401 | - _ST_SELECT_MAX_OSFD = pq_max_osfd; | ||
402 | - } | ||
403 | - } | ||
404 | - } | ||
405 | - } else if (nfd < 0) { | ||
406 | - /* | ||
407 | - * It can happen when a thread closes file descriptor | ||
408 | - * that is being used by some other thread -- BAD! | ||
409 | - */ | ||
410 | - if (errno == EBADF) { | ||
411 | - _st_select_find_bad_fd(); | ||
412 | - } | ||
413 | - } | ||
414 | -} | ||
415 | - | ||
416 | -ST_HIDDEN int _st_select_fd_new(int osfd) | ||
417 | -{ | ||
418 | - if (osfd >= FD_SETSIZE) { | ||
419 | - errno = EMFILE; | ||
420 | - return -1; | ||
421 | - } | ||
422 | - | ||
423 | - return 0; | ||
424 | -} | ||
425 | - | ||
426 | -ST_HIDDEN int _st_select_fd_close(int osfd) | ||
427 | -{ | ||
428 | - if (_ST_SELECT_READ_CNT(osfd) || _ST_SELECT_WRITE_CNT(osfd) || _ST_SELECT_EXCEP_CNT(osfd)) { | ||
429 | - errno = EBUSY; | ||
430 | - return -1; | ||
431 | - } | ||
432 | - | ||
433 | - return 0; | ||
434 | -} | ||
435 | - | ||
436 | -ST_HIDDEN int _st_select_fd_getlimit(void) | ||
437 | -{ | ||
438 | - return FD_SETSIZE; | ||
439 | -} | ||
440 | - | ||
441 | -static _st_eventsys_t _st_select_eventsys = { | ||
442 | - "select", | ||
443 | - ST_EVENTSYS_SELECT, | ||
444 | - _st_select_init, | ||
445 | - _st_select_dispatch, | ||
446 | - _st_select_pollset_add, | ||
447 | - _st_select_pollset_del, | ||
448 | - _st_select_fd_new, | ||
449 | - _st_select_fd_close, | ||
450 | - _st_select_fd_getlimit | ||
451 | -}; | ||
452 | - | ||
453 | -#ifdef MD_HAVE_POLL | ||
454 | -/***************************************** | ||
455 | - * poll event system | ||
456 | - */ | ||
457 | - | ||
458 | -ST_HIDDEN int _st_poll_init(void) | ||
459 | -{ | ||
460 | - _st_poll_data = (struct _st_polldata *) malloc(sizeof(*_st_poll_data)); | ||
461 | - if (!_st_poll_data) { | ||
462 | - return -1; | ||
463 | - } | ||
464 | - | ||
465 | - _ST_POLLFDS = (struct pollfd *) malloc(ST_MIN_POLLFDS_SIZE * sizeof(struct pollfd)); | ||
466 | - if (!_ST_POLLFDS) { | ||
467 | - free(_st_poll_data); | ||
468 | - _st_poll_data = NULL; | ||
469 | - return -1; | ||
470 | - } | ||
471 | - _ST_POLLFDS_SIZE = ST_MIN_POLLFDS_SIZE; | ||
472 | - _ST_POLL_OSFD_CNT = 0; | ||
473 | - | ||
474 | - return 0; | ||
475 | -} | ||
476 | - | ||
477 | -ST_HIDDEN int _st_poll_pollset_add(struct pollfd *pds, int npds) | ||
478 | -{ | ||
479 | - struct pollfd *pd; | ||
480 | - struct pollfd *epd = pds + npds; | ||
481 | - | ||
482 | - for (pd = pds; pd < epd; pd++) { | ||
483 | - if (pd->fd < 0 || !pd->events) { | ||
484 | - errno = EINVAL; | ||
485 | - return -1; | ||
486 | - } | ||
487 | - } | ||
488 | - | ||
489 | - _ST_POLL_OSFD_CNT += npds; | ||
490 | - | ||
491 | - return 0; | ||
492 | -} | ||
493 | - | ||
494 | -/* ARGSUSED */ | ||
495 | -ST_HIDDEN void _st_poll_pollset_del(struct pollfd *pds, int npds) | ||
496 | -{ | ||
497 | - _ST_POLL_OSFD_CNT -= npds; | ||
498 | - ST_ASSERT(_ST_POLL_OSFD_CNT >= 0); | ||
499 | -} | ||
500 | - | ||
501 | -ST_HIDDEN void _st_poll_dispatch(void) | ||
502 | -{ | ||
503 | - int timeout, nfd; | ||
504 | - _st_clist_t *q; | ||
505 | - st_utime_t min_timeout; | ||
506 | - _st_pollq_t *pq; | ||
507 | - struct pollfd *pds, *epds, *pollfds; | ||
508 | - | ||
509 | - /* | ||
510 | - * Build up the array of struct pollfd to wait on. | ||
511 | - * If existing array is not big enough, release it and allocate a new one. | ||
512 | - */ | ||
513 | - ST_ASSERT(_ST_POLL_OSFD_CNT >= 0); | ||
514 | - if (_ST_POLL_OSFD_CNT > _ST_POLLFDS_SIZE) { | ||
515 | - free(_ST_POLLFDS); | ||
516 | - _ST_POLLFDS = (struct pollfd *) malloc((_ST_POLL_OSFD_CNT + 10) * sizeof(struct pollfd)); | ||
517 | - ST_ASSERT(_ST_POLLFDS != NULL); | ||
518 | - _ST_POLLFDS_SIZE = _ST_POLL_OSFD_CNT + 10; | ||
519 | - } | ||
520 | - pollfds = _ST_POLLFDS; | ||
521 | - | ||
522 | - /* Gather all descriptors into one array */ | ||
523 | - for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { | ||
524 | - pq = _ST_POLLQUEUE_PTR(q); | ||
525 | - memcpy(pollfds, pq->pds, sizeof(struct pollfd) * pq->npds); | ||
526 | - pollfds += pq->npds; | ||
527 | - } | ||
528 | - ST_ASSERT(pollfds <= _ST_POLLFDS + _ST_POLLFDS_SIZE); | ||
529 | - | ||
530 | - if (_ST_SLEEPQ == NULL) { | ||
531 | - timeout = -1; | ||
532 | - } else { | ||
533 | - min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 : (_ST_SLEEPQ->due - _ST_LAST_CLOCK); | ||
534 | - timeout = (int) (min_timeout / 1000); | ||
535 | - } | ||
536 | - | ||
537 | - /* Check for I/O operations */ | ||
538 | - nfd = poll(_ST_POLLFDS, _ST_POLL_OSFD_CNT, timeout); | ||
539 | - | ||
540 | - /* Notify threads that are associated with the selected descriptors */ | ||
541 | - if (nfd > 0) { | ||
542 | - pollfds = _ST_POLLFDS; | ||
543 | - for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { | ||
544 | - pq = _ST_POLLQUEUE_PTR(q); | ||
545 | - epds = pollfds + pq->npds; | ||
546 | - for (pds = pollfds; pds < epds; pds++) { | ||
547 | - if (pds->revents) { | ||
548 | - break; | ||
549 | - } | ||
550 | - } | ||
551 | - if (pds < epds) { | ||
552 | - memcpy(pq->pds, pollfds, sizeof(struct pollfd) * pq->npds); | ||
553 | - ST_REMOVE_LINK(&pq->links); | ||
554 | - pq->on_ioq = 0; | ||
555 | - | ||
556 | - if (pq->thread->flags & _ST_FL_ON_SLEEPQ) { | ||
557 | - _ST_DEL_SLEEPQ(pq->thread); | ||
558 | - } | ||
559 | - pq->thread->state = _ST_ST_RUNNABLE; | ||
560 | - _ST_ADD_RUNQ(pq->thread); | ||
561 | - | ||
562 | - _ST_POLL_OSFD_CNT -= pq->npds; | ||
563 | - ST_ASSERT(_ST_POLL_OSFD_CNT >= 0); | ||
564 | - } | ||
565 | - pollfds = epds; | ||
566 | - } | ||
567 | - } | ||
568 | -} | ||
569 | - | ||
570 | -/* ARGSUSED */ | ||
571 | -ST_HIDDEN int _st_poll_fd_new(int osfd) | ||
572 | -{ | ||
573 | - return 0; | ||
574 | -} | ||
575 | - | ||
576 | -/* ARGSUSED */ | ||
577 | -ST_HIDDEN int _st_poll_fd_close(int osfd) | ||
578 | -{ | ||
579 | - /* | ||
580 | - * We don't maintain I/O counts for poll event system | ||
581 | - * so nothing to check here. | ||
582 | - */ | ||
583 | - return 0; | ||
584 | -} | ||
585 | - | ||
586 | -ST_HIDDEN int _st_poll_fd_getlimit(void) | ||
587 | -{ | ||
588 | - /* zero means no specific limit */ | ||
589 | - return 0; | ||
590 | -} | ||
591 | - | ||
592 | -static _st_eventsys_t _st_poll_eventsys = { | ||
593 | - "poll", | ||
594 | - ST_EVENTSYS_POLL, | ||
595 | - _st_poll_init, | ||
596 | - _st_poll_dispatch, | ||
597 | - _st_poll_pollset_add, | ||
598 | - _st_poll_pollset_del, | ||
599 | - _st_poll_fd_new, | ||
600 | - _st_poll_fd_close, | ||
601 | - _st_poll_fd_getlimit | ||
602 | -}; | ||
603 | -#endif /* MD_HAVE_POLL */ | ||
604 | - | ||
605 | - | ||
606 | -#ifdef MD_HAVE_KQUEUE | ||
607 | -/***************************************** | ||
608 | - * kqueue event system | ||
609 | - */ | ||
610 | - | ||
611 | -ST_HIDDEN int _st_kq_init(void) | ||
612 | -{ | ||
613 | - int err = 0; | ||
614 | - int rv = 0; | ||
615 | - | ||
616 | - _st_kq_data = (struct _st_kqdata *) calloc(1, sizeof(*_st_kq_data)); | ||
617 | - if (!_st_kq_data) { | ||
618 | - return -1; | ||
619 | - } | ||
620 | - | ||
621 | - if ((_st_kq_data->kq = kqueue()) < 0) { | ||
622 | - err = errno; | ||
623 | - rv = -1; | ||
624 | - goto cleanup_kq; | ||
625 | - } | ||
626 | - fcntl(_st_kq_data->kq, F_SETFD, FD_CLOEXEC); | ||
627 | - _st_kq_data->pid = getpid(); | ||
628 | - | ||
629 | - /* | ||
630 | - * Allocate file descriptor data array. | ||
631 | - * FD_SETSIZE looks like good initial size. | ||
632 | - */ | ||
633 | - _st_kq_data->fd_data_size = FD_SETSIZE; | ||
634 | - _st_kq_data->fd_data = (_kq_fd_data_t *)calloc(_st_kq_data->fd_data_size, sizeof(_kq_fd_data_t)); | ||
635 | - if (!_st_kq_data->fd_data) { | ||
636 | - err = errno; | ||
637 | - rv = -1; | ||
638 | - goto cleanup_kq; | ||
639 | - } | ||
640 | - | ||
641 | - /* Allocate event lists */ | ||
642 | - _st_kq_data->evtlist_size = ST_KQ_MIN_EVTLIST_SIZE; | ||
643 | - _st_kq_data->evtlist = (struct kevent *)malloc(_st_kq_data->evtlist_size * sizeof(struct kevent)); | ||
644 | - _st_kq_data->addlist_size = ST_KQ_MIN_EVTLIST_SIZE; | ||
645 | - _st_kq_data->addlist = (struct kevent *)malloc(_st_kq_data->addlist_size * sizeof(struct kevent)); | ||
646 | - _st_kq_data->dellist_size = ST_KQ_MIN_EVTLIST_SIZE; | ||
647 | - _st_kq_data->dellist = (struct kevent *)malloc(_st_kq_data->dellist_size * sizeof(struct kevent)); | ||
648 | - if (!_st_kq_data->evtlist || !_st_kq_data->addlist || | ||
649 | - !_st_kq_data->dellist) { | ||
650 | - err = ENOMEM; | ||
651 | - rv = -1; | ||
652 | - } | ||
653 | - | ||
654 | - cleanup_kq: | ||
655 | - if (rv < 0) { | ||
656 | - if (_st_kq_data->kq >= 0) { | ||
657 | - close(_st_kq_data->kq); | ||
658 | - } | ||
659 | - free(_st_kq_data->fd_data); | ||
660 | - free(_st_kq_data->evtlist); | ||
661 | - free(_st_kq_data->addlist); | ||
662 | - free(_st_kq_data->dellist); | ||
663 | - free(_st_kq_data); | ||
664 | - _st_kq_data = NULL; | ||
665 | - errno = err; | ||
666 | - } | ||
667 | - | ||
668 | - return rv; | ||
669 | -} | ||
670 | - | ||
671 | -ST_HIDDEN int _st_kq_fd_data_expand(int maxfd) | ||
672 | -{ | ||
673 | - _kq_fd_data_t *ptr; | ||
674 | - int n = _st_kq_data->fd_data_size; | ||
675 | - | ||
676 | - while (maxfd >= n) { | ||
677 | - n <<= 1; | ||
678 | - } | ||
679 | - | ||
680 | - ptr = (_kq_fd_data_t *)realloc(_st_kq_data->fd_data, n * sizeof(_kq_fd_data_t)); | ||
681 | - if (!ptr) { | ||
682 | - return -1; | ||
683 | - } | ||
684 | - | ||
685 | - memset(ptr + _st_kq_data->fd_data_size, 0, (n - _st_kq_data->fd_data_size) * sizeof(_kq_fd_data_t)); | ||
686 | - | ||
687 | - _st_kq_data->fd_data = ptr; | ||
688 | - _st_kq_data->fd_data_size = n; | ||
689 | - | ||
690 | - return 0; | ||
691 | -} | ||
692 | - | ||
693 | -ST_HIDDEN int _st_kq_addlist_expand(int avail) | ||
694 | -{ | ||
695 | - struct kevent *ptr; | ||
696 | - int n = _st_kq_data->addlist_size; | ||
697 | - | ||
698 | - while (avail > n - _st_kq_data->addlist_cnt) { | ||
699 | - n <<= 1; | ||
700 | - } | ||
701 | - | ||
702 | - ptr = (struct kevent *)realloc(_st_kq_data->addlist, n * sizeof(struct kevent)); | ||
703 | - if (!ptr) { | ||
704 | - return -1; | ||
705 | - } | ||
706 | - | ||
707 | - _st_kq_data->addlist = ptr; | ||
708 | - _st_kq_data->addlist_size = n; | ||
709 | - | ||
710 | - /* | ||
711 | - * Try to expand the result event list too | ||
712 | - * (although we don't have to do it). | ||
713 | - */ | ||
714 | - ptr = (struct kevent *)realloc(_st_kq_data->evtlist, n * sizeof(struct kevent)); | ||
715 | - if (ptr) { | ||
716 | - _st_kq_data->evtlist = ptr; | ||
717 | - _st_kq_data->evtlist_size = n; | ||
718 | - } | ||
719 | - | ||
720 | - return 0; | ||
721 | -} | ||
722 | - | ||
723 | -ST_HIDDEN void _st_kq_addlist_add(const struct kevent *kev) | ||
724 | -{ | ||
725 | - ST_ASSERT(_st_kq_data->addlist_cnt < _st_kq_data->addlist_size); | ||
726 | - memcpy(_st_kq_data->addlist + _st_kq_data->addlist_cnt, kev, sizeof(struct kevent)); | ||
727 | - _st_kq_data->addlist_cnt++; | ||
728 | -} | ||
729 | - | ||
730 | -ST_HIDDEN void _st_kq_dellist_add(const struct kevent *kev) | ||
731 | -{ | ||
732 | - int n = _st_kq_data->dellist_size; | ||
733 | - | ||
734 | - if (_st_kq_data->dellist_cnt >= n) { | ||
735 | - struct kevent *ptr; | ||
736 | - | ||
737 | - n <<= 1; | ||
738 | - ptr = (struct kevent *)realloc(_st_kq_data->dellist, n * sizeof(struct kevent)); | ||
739 | - if (!ptr) { | ||
740 | - /* See comment in _st_kq_pollset_del() */ | ||
741 | - return; | ||
742 | - } | ||
743 | - | ||
744 | - _st_kq_data->dellist = ptr; | ||
745 | - _st_kq_data->dellist_size = n; | ||
746 | - } | ||
747 | - | ||
748 | - memcpy(_st_kq_data->dellist + _st_kq_data->dellist_cnt, kev, sizeof(struct kevent)); | ||
749 | - _st_kq_data->dellist_cnt++; | ||
750 | -} | ||
751 | - | ||
752 | -ST_HIDDEN int _st_kq_pollset_add(struct pollfd *pds, int npds) | ||
753 | -{ | ||
754 | - struct kevent kev; | ||
755 | - struct pollfd *pd; | ||
756 | - struct pollfd *epd = pds + npds; | ||
757 | - | ||
758 | - /* | ||
759 | - * Pollset adding is "atomic". That is, either it succeeded for | ||
760 | - * all descriptors in the set or it failed. It means that we | ||
761 | - * need to do all the checks up front so we don't have to | ||
762 | - * "unwind" if adding of one of the descriptors failed. | ||
763 | - */ | ||
764 | - for (pd = pds; pd < epd; pd++) { | ||
765 | - /* POLLIN and/or POLLOUT must be set, but nothing else */ | ||
766 | - if (pd->fd < 0 || !pd->events || (pd->events & ~(POLLIN | POLLOUT))) { | ||
767 | - errno = EINVAL; | ||
768 | - return -1; | ||
769 | - } | ||
770 | - if (pd->fd >= _st_kq_data->fd_data_size && _st_kq_fd_data_expand(pd->fd) < 0) { | ||
771 | - return -1; | ||
772 | - } | ||
773 | - } | ||
774 | - | ||
775 | - /* | ||
776 | - * Make sure we have enough room in the addlist for twice as many | ||
777 | - * descriptors as in the pollset (for both READ and WRITE filters). | ||
778 | - */ | ||
779 | - npds <<= 1; | ||
780 | - if (npds > _st_kq_data->addlist_size - _st_kq_data->addlist_cnt && _st_kq_addlist_expand(npds) < 0) { | ||
781 | - return -1; | ||
782 | - } | ||
783 | - | ||
784 | - for (pd = pds; pd < epd; pd++) { | ||
785 | - if ((pd->events & POLLIN) && (_ST_KQ_READ_CNT(pd->fd)++ == 0)) { | ||
786 | - memset(&kev, 0, sizeof(kev)); | ||
787 | - kev.ident = pd->fd; | ||
788 | - kev.filter = EVFILT_READ; | ||
789 | -#ifdef NOTE_EOF | ||
790 | - /* Make it behave like select() and poll() */ | ||
791 | - kev.fflags = NOTE_EOF; | ||
792 | -#endif | ||
793 | - kev.flags = (EV_ADD | EV_ONESHOT); | ||
794 | - _st_kq_addlist_add(&kev); | ||
795 | - } | ||
796 | - if ((pd->events & POLLOUT) && (_ST_KQ_WRITE_CNT(pd->fd)++ == 0)) { | ||
797 | - memset(&kev, 0, sizeof(kev)); | ||
798 | - kev.ident = pd->fd; | ||
799 | - kev.filter = EVFILT_WRITE; | ||
800 | - kev.flags = (EV_ADD | EV_ONESHOT); | ||
801 | - _st_kq_addlist_add(&kev); | ||
802 | - } | ||
803 | - } | ||
804 | - | ||
805 | - return 0; | ||
806 | -} | ||
807 | - | ||
808 | -ST_HIDDEN void _st_kq_pollset_del(struct pollfd *pds, int npds) | ||
809 | -{ | ||
810 | - struct kevent kev; | ||
811 | - struct pollfd *pd; | ||
812 | - struct pollfd *epd = pds + npds; | ||
813 | - | ||
814 | - /* | ||
815 | - * It's OK if deleting fails because a descriptor will either be | ||
816 | - * closed or fire only once (we set EV_ONESHOT flag). | ||
817 | - */ | ||
818 | - _st_kq_data->dellist_cnt = 0; | ||
819 | - for (pd = pds; pd < epd; pd++) { | ||
820 | - if ((pd->events & POLLIN) && (--_ST_KQ_READ_CNT(pd->fd) == 0)) { | ||
821 | - memset(&kev, 0, sizeof(kev)); | ||
822 | - kev.ident = pd->fd; | ||
823 | - kev.filter = EVFILT_READ; | ||
824 | - kev.flags = EV_DELETE; | ||
825 | - _st_kq_dellist_add(&kev); | ||
826 | - } | ||
827 | - if ((pd->events & POLLOUT) && (--_ST_KQ_WRITE_CNT(pd->fd) == 0)) { | ||
828 | - memset(&kev, 0, sizeof(kev)); | ||
829 | - kev.ident = pd->fd; | ||
830 | - kev.filter = EVFILT_WRITE; | ||
831 | - kev.flags = EV_DELETE; | ||
832 | - _st_kq_dellist_add(&kev); | ||
833 | - } | ||
834 | - } | ||
835 | - | ||
836 | - if (_st_kq_data->dellist_cnt > 0) { | ||
837 | - /* | ||
838 | - * We do "synchronous" kqueue deletes to avoid deleting | ||
839 | - * closed descriptors and other possible problems. | ||
840 | - */ | ||
841 | - int rv; | ||
842 | - do { | ||
843 | - /* This kevent() won't block since result list size is 0 */ | ||
844 | - rv = kevent(_st_kq_data->kq, _st_kq_data->dellist, _st_kq_data->dellist_cnt, NULL, 0, NULL); | ||
845 | - } while (rv < 0 && errno == EINTR); | ||
846 | - } | ||
847 | -} | ||
848 | - | ||
849 | -ST_HIDDEN void _st_kq_dispatch(void) | ||
850 | -{ | ||
851 | - struct timespec timeout, *tsp; | ||
852 | - struct kevent kev; | ||
853 | - st_utime_t min_timeout; | ||
854 | - _st_clist_t *q; | ||
855 | - _st_pollq_t *pq; | ||
856 | - struct pollfd *pds, *epds; | ||
857 | - int nfd, i, osfd, notify, filter; | ||
858 | - short events, revents; | ||
859 | - | ||
860 | - if (_ST_SLEEPQ == NULL) { | ||
861 | - tsp = NULL; | ||
862 | - } else { | ||
863 | - min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 : (_ST_SLEEPQ->due - _ST_LAST_CLOCK); | ||
864 | - timeout.tv_sec = (time_t) (min_timeout / 1000000); | ||
865 | - timeout.tv_nsec = (long) ((min_timeout % 1000000) * 1000); | ||
866 | - tsp = &timeout; | ||
867 | - } | ||
868 | - | ||
869 | - retry_kevent: | ||
870 | - /* Check for I/O operations */ | ||
871 | - nfd = kevent(_st_kq_data->kq, _st_kq_data->addlist, _st_kq_data->addlist_cnt, | ||
872 | - _st_kq_data->evtlist, _st_kq_data->evtlist_size, tsp); | ||
873 | - | ||
874 | - _st_kq_data->addlist_cnt = 0; | ||
875 | - | ||
876 | - if (nfd > 0) { | ||
877 | - for (i = 0; i < nfd; i++) { | ||
878 | - osfd = _st_kq_data->evtlist[i].ident; | ||
879 | - filter = _st_kq_data->evtlist[i].filter; | ||
880 | - | ||
881 | - if (filter == EVFILT_READ) { | ||
882 | - _ST_KQ_REVENTS(osfd) |= POLLIN; | ||
883 | - } else if (filter == EVFILT_WRITE) { | ||
884 | - _ST_KQ_REVENTS(osfd) |= POLLOUT; | ||
885 | - } | ||
886 | - if (_st_kq_data->evtlist[i].flags & EV_ERROR) { | ||
887 | - if (_st_kq_data->evtlist[i].data == EBADF) { | ||
888 | - _ST_KQ_REVENTS(osfd) |= POLLNVAL; | ||
889 | - } else { | ||
890 | - _ST_KQ_REVENTS(osfd) |= POLLERR; | ||
891 | - } | ||
892 | - } | ||
893 | - } | ||
894 | - | ||
895 | - _st_kq_data->dellist_cnt = 0; | ||
896 | - | ||
897 | - for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { | ||
898 | - pq = _ST_POLLQUEUE_PTR(q); | ||
899 | - notify = 0; | ||
900 | - epds = pq->pds + pq->npds; | ||
901 | - | ||
902 | - for (pds = pq->pds; pds < epds; pds++) { | ||
903 | - osfd = pds->fd; | ||
904 | - events = pds->events; | ||
905 | - revents = (short)(_ST_KQ_REVENTS(osfd) & ~(POLLIN | POLLOUT)); | ||
906 | - if ((events & POLLIN) && (_ST_KQ_REVENTS(osfd) & POLLIN)) { | ||
907 | - revents |= POLLIN; | ||
908 | - } | ||
909 | - if ((events & POLLOUT) && (_ST_KQ_REVENTS(osfd) & POLLOUT)) { | ||
910 | - revents |= POLLOUT; | ||
911 | - } | ||
912 | - pds->revents = revents; | ||
913 | - if (revents) { | ||
914 | - notify = 1; | ||
915 | - } | ||
916 | - } | ||
917 | - if (notify) { | ||
918 | - ST_REMOVE_LINK(&pq->links); | ||
919 | - pq->on_ioq = 0; | ||
920 | - for (pds = pq->pds; pds < epds; pds++) { | ||
921 | - osfd = pds->fd; | ||
922 | - events = pds->events; | ||
923 | - /* | ||
924 | - * We set EV_ONESHOT flag so we only need to delete | ||
925 | - * descriptor if it didn't fire. | ||
926 | - */ | ||
927 | - if ((events & POLLIN) && (--_ST_KQ_READ_CNT(osfd) == 0) && ((_ST_KQ_REVENTS(osfd) & POLLIN) == 0)) { | ||
928 | - memset(&kev, 0, sizeof(kev)); | ||
929 | - kev.ident = osfd; | ||
930 | - kev.filter = EVFILT_READ; | ||
931 | - kev.flags = EV_DELETE; | ||
932 | - _st_kq_dellist_add(&kev); | ||
933 | - } | ||
934 | - if ((events & POLLOUT) && (--_ST_KQ_WRITE_CNT(osfd) == 0) && ((_ST_KQ_REVENTS(osfd) & POLLOUT) == 0)) { | ||
935 | - memset(&kev, 0, sizeof(kev)); | ||
936 | - kev.ident = osfd; | ||
937 | - kev.filter = EVFILT_WRITE; | ||
938 | - kev.flags = EV_DELETE; | ||
939 | - _st_kq_dellist_add(&kev); | ||
940 | - } | ||
941 | - } | ||
942 | - | ||
943 | - if (pq->thread->flags & _ST_FL_ON_SLEEPQ) { | ||
944 | - _ST_DEL_SLEEPQ(pq->thread); | ||
945 | - } | ||
946 | - pq->thread->state = _ST_ST_RUNNABLE; | ||
947 | - _ST_ADD_RUNQ(pq->thread); | ||
948 | - } | ||
949 | - } | ||
950 | - | ||
951 | - if (_st_kq_data->dellist_cnt > 0) { | ||
952 | - int rv; | ||
953 | - do { | ||
954 | - /* This kevent() won't block since result list size is 0 */ | ||
955 | - rv = kevent(_st_kq_data->kq, _st_kq_data->dellist, _st_kq_data->dellist_cnt, NULL, 0, NULL); | ||
956 | - } while (rv < 0 && errno == EINTR); | ||
957 | - } | ||
958 | - | ||
959 | - for (i = 0; i < nfd; i++) { | ||
960 | - osfd = _st_kq_data->evtlist[i].ident; | ||
961 | - _ST_KQ_REVENTS(osfd) = 0; | ||
962 | - } | ||
963 | - } else if (nfd < 0) { | ||
964 | - if (errno == EBADF && _st_kq_data->pid != getpid()) { | ||
965 | - /* We probably forked, reinitialize kqueue */ | ||
966 | - if ((_st_kq_data->kq = kqueue()) < 0) { | ||
967 | - /* There is nothing we can do here, will retry later */ | ||
968 | - return; | ||
969 | - } | ||
970 | - fcntl(_st_kq_data->kq, F_SETFD, FD_CLOEXEC); | ||
971 | - _st_kq_data->pid = getpid(); | ||
972 | - /* Re-register all descriptors on ioq with new kqueue */ | ||
973 | - memset(_st_kq_data->fd_data, 0, _st_kq_data->fd_data_size * sizeof(_kq_fd_data_t)); | ||
974 | - for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { | ||
975 | - pq = _ST_POLLQUEUE_PTR(q); | ||
976 | - _st_kq_pollset_add(pq->pds, pq->npds); | ||
977 | - } | ||
978 | - goto retry_kevent; | ||
979 | - } | ||
980 | - } | ||
981 | -} | ||
982 | - | ||
983 | -ST_HIDDEN int _st_kq_fd_new(int osfd) | ||
984 | -{ | ||
985 | - if (osfd >= _st_kq_data->fd_data_size && _st_kq_fd_data_expand(osfd) < 0) { | ||
986 | - return -1; | ||
987 | - } | ||
988 | - | ||
989 | - return 0; | ||
990 | -} | ||
991 | - | ||
992 | -ST_HIDDEN int _st_kq_fd_close(int osfd) | ||
993 | -{ | ||
994 | - if (_ST_KQ_READ_CNT(osfd) || _ST_KQ_WRITE_CNT(osfd)) { | ||
995 | - errno = EBUSY; | ||
996 | - return -1; | ||
997 | - } | ||
998 | - | ||
999 | - return 0; | ||
1000 | -} | ||
1001 | - | ||
1002 | -ST_HIDDEN int _st_kq_fd_getlimit(void) | ||
1003 | -{ | ||
1004 | - /* zero means no specific limit */ | ||
1005 | - return 0; | ||
1006 | -} | ||
1007 | - | ||
1008 | -static _st_eventsys_t _st_kq_eventsys = { | ||
1009 | - "kqueue", | ||
1010 | - ST_EVENTSYS_ALT, | ||
1011 | - _st_kq_init, | ||
1012 | - _st_kq_dispatch, | ||
1013 | - _st_kq_pollset_add, | ||
1014 | - _st_kq_pollset_del, | ||
1015 | - _st_kq_fd_new, | ||
1016 | - _st_kq_fd_close, | ||
1017 | - _st_kq_fd_getlimit | ||
1018 | -}; | ||
1019 | -#endif /* MD_HAVE_KQUEUE */ | ||
1020 | - | ||
1021 | -#ifdef MD_HAVE_EPOLL | ||
1022 | -/***************************************** | ||
1023 | * epoll event system | 95 | * epoll event system |
1024 | */ | 96 | */ |
1025 | 97 | ||
@@ -1386,8 +458,6 @@ static _st_eventsys_t _st_epoll_eventsys = { | @@ -1386,8 +458,6 @@ static _st_eventsys_t _st_epoll_eventsys = { | ||
1386 | _st_epoll_fd_close, | 458 | _st_epoll_fd_close, |
1387 | _st_epoll_fd_getlimit | 459 | _st_epoll_fd_getlimit |
1388 | }; | 460 | }; |
1389 | -#endif /* MD_HAVE_EPOLL */ | ||
1390 | - | ||
1391 | 461 | ||
1392 | /***************************************** | 462 | /***************************************** |
1393 | * Public functions | 463 | * Public functions |
@@ -1402,30 +472,12 @@ int st_set_eventsys(int eventsys) | @@ -1402,30 +472,12 @@ int st_set_eventsys(int eventsys) | ||
1402 | 472 | ||
1403 | switch (eventsys) { | 473 | switch (eventsys) { |
1404 | case ST_EVENTSYS_DEFAULT: | 474 | case ST_EVENTSYS_DEFAULT: |
1405 | -#ifdef USE_POLL | ||
1406 | - _st_eventsys = &_st_poll_eventsys; | ||
1407 | -#else | ||
1408 | - _st_eventsys = &_st_select_eventsys; | ||
1409 | -#endif | ||
1410 | - break; | ||
1411 | - case ST_EVENTSYS_SELECT: | ||
1412 | - _st_eventsys = &_st_select_eventsys; | ||
1413 | - break; | ||
1414 | -#ifdef MD_HAVE_POLL | ||
1415 | - case ST_EVENTSYS_POLL: | ||
1416 | - _st_eventsys = &_st_poll_eventsys; | ||
1417 | - break; | ||
1418 | -#endif | ||
1419 | case ST_EVENTSYS_ALT: | 475 | case ST_EVENTSYS_ALT: |
1420 | -#if defined (MD_HAVE_KQUEUE) | ||
1421 | - _st_eventsys = &_st_kq_eventsys; | ||
1422 | -#elif defined (MD_HAVE_EPOLL) | 476 | + default: |
1423 | if (_st_epoll_is_supported()) { | 477 | if (_st_epoll_is_supported()) { |
1424 | _st_eventsys = &_st_epoll_eventsys; | 478 | _st_eventsys = &_st_epoll_eventsys; |
479 | + break; | ||
1425 | } | 480 | } |
1426 | -#endif | ||
1427 | - break; | ||
1428 | - default: | ||
1429 | errno = EINVAL; | 481 | errno = EINVAL; |
1430 | return -1; | 482 | return -1; |
1431 | } | 483 | } |
@@ -177,10 +177,6 @@ | @@ -177,10 +177,6 @@ | ||
177 | /***************************************** | 177 | /***************************************** |
178 | * Other defines | 178 | * Other defines |
179 | */ | 179 | */ |
180 | -#if !defined(MD_HAVE_POLL) && !defined(MD_DONT_HAVE_POLL) | ||
181 | - #define MD_HAVE_POLL | ||
182 | -#endif | ||
183 | - | ||
184 | #ifndef MD_STACK_PAD_SIZE | 180 | #ifndef MD_STACK_PAD_SIZE |
185 | #define MD_STACK_PAD_SIZE 128 | 181 | #define MD_STACK_PAD_SIZE 128 |
186 | #endif | 182 | #endif |
-
请 注册 或 登录 后发表评论