winlin

Merge branch 'srs.master'

1 -/*  
2 - * The contents of this file are subject to the Mozilla Public  
3 - * License Version 1.1 (the "License"); you may not use this file  
4 - * except in compliance with the License. You may obtain a copy of  
5 - * the License at http://www.mozilla.org/MPL/  
6 - *  
7 - * Software distributed under the License is distributed on an "AS  
8 - * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or  
9 - * implied. See the License for the specific language governing  
10 - * rights and limitations under the License.  
11 - *  
12 - * The Original Code is the Netscape Portable Runtime library.  
13 - *  
14 - * The Initial Developer of the Original Code is Netscape  
15 - * Communications Corporation. Portions created by Netscape are  
16 - * Copyright (C) 1994-2000 Netscape Communications Corporation. All  
17 - * Rights Reserved.  
18 - *  
19 - * Contributor(s): Silicon Graphics, Inc.  
20 - *  
21 - * Portions created by SGI are Copyright (C) 2000-2001 Silicon  
22 - * Graphics, Inc. All Rights Reserved.  
23 - *  
24 - * Alternatively, the contents of this file may be used under the  
25 - * terms of the GNU General Public License Version 2 or later (the  
26 - * "GPL"), in which case the provisions of the GPL are applicable  
27 - * instead of those above. If you wish to allow use of your  
28 - * version of this file only under the terms of the GPL and not to  
29 - * allow others to use your version of this file under the MPL,  
30 - * indicate your decision by deleting the provisions above and  
31 - * replace them with the notice and other provisions required by  
32 - * the GPL. If you do not delete the provisions above, a recipient  
33 - * may use your version of this file under either the MPL or the  
34 - * GPL.  
35 - */  
36 -  
37 -/*  
38 - * This file is derived directly from Netscape Communications Corporation,  
39 - * and consists of extensive modifications made during the year(s) 1999-2000.  
40 - */  
41 -  
42 -#include <stdlib.h>  
43 -#include <unistd.h>  
44 -#include <fcntl.h>  
45 -#include <string.h>  
46 -#include <time.h>  
47 -#include <errno.h>  
48 -#include "common.h"  
49 -  
50 -/* Global data */  
51 -_st_vp_t _st_this_vp; /* This VP */  
52 -_st_thread_t *_st_this_thread; /* Current thread */  
53 -int _st_active_count = 0; /* Active thread count */  
54 -  
55 -time_t _st_curr_time = 0; /* Current time as returned by time(2) */  
56 -st_utime_t _st_last_tset; /* Last time it was fetched */  
57 -  
58 -int st_poll(struct pollfd *pds, int npds, st_utime_t timeout)  
59 -{  
60 - struct pollfd *pd;  
61 - struct pollfd *epd = pds + npds;  
62 - _st_pollq_t pq;  
63 - _st_thread_t *me = _ST_CURRENT_THREAD();  
64 - int n;  
65 -  
66 - if (me->flags & _ST_FL_INTERRUPT) {  
67 - me->flags &= ~_ST_FL_INTERRUPT;  
68 - errno = EINTR;  
69 - return -1;  
70 - }  
71 -  
72 - if ((*_st_eventsys->pollset_add)(pds, npds) < 0) {  
73 - return -1;  
74 - }  
75 -  
76 - pq.pds = pds;  
77 - pq.npds = npds;  
78 - pq.thread = me;  
79 - pq.on_ioq = 1;  
80 - _ST_ADD_IOQ(pq);  
81 - if (timeout != ST_UTIME_NO_TIMEOUT) {  
82 - _ST_ADD_SLEEPQ(me, timeout);  
83 - }  
84 - me->state = _ST_ST_IO_WAIT;  
85 -  
86 - _ST_SWITCH_CONTEXT(me);  
87 -  
88 - n = 0;  
89 - if (pq.on_ioq) {  
90 - /* If we timed out, the pollq might still be on the ioq. Remove it */  
91 - _ST_DEL_IOQ(pq);  
92 - (*_st_eventsys->pollset_del)(pds, npds);  
93 - } else {  
94 - /* Count the number of ready descriptors */  
95 - for (pd = pds; pd < epd; pd++) {  
96 - if (pd->revents) {  
97 - n++;  
98 - }  
99 - }  
100 - }  
101 -  
102 - if (me->flags & _ST_FL_INTERRUPT) {  
103 - me->flags &= ~_ST_FL_INTERRUPT;  
104 - errno = EINTR;  
105 - return -1;  
106 - }  
107 -  
108 - return n;  
109 -}  
110 -  
111 -void _st_vp_schedule(void)  
112 -{  
113 - _st_thread_t *trd;  
114 -  
115 - if (_ST_RUNQ.next != &_ST_RUNQ) {  
116 - /* Pull thread off of the run queue */  
117 - trd = _ST_THREAD_PTR(_ST_RUNQ.next);  
118 - _ST_DEL_RUNQ(trd);  
119 - } else {  
120 - /* If there are no threads to run, switch to the idle thread */  
121 - trd = _st_this_vp.idle_thread;  
122 - }  
123 - ST_ASSERT(trd->state == _ST_ST_RUNNABLE);  
124 -  
125 - /* Resume the thread */  
126 - trd->state = _ST_ST_RUNNING;  
127 - _ST_RESTORE_CONTEXT(trd);  
128 -}  
129 -  
130 -/*  
131 - * Initialize this Virtual Processor  
132 - */  
133 -int st_init(void)  
134 -{  
135 - _st_thread_t *trd;  
136 -  
137 - if (_st_active_count) {  
138 - /* Already initialized */  
139 - return 0;  
140 - }  
141 -  
142 - /* We can ignore return value here */  
143 - st_set_eventsys(ST_EVENTSYS_DEFAULT);  
144 -  
145 - if (_st_io_init() < 0) {  
146 - return -1;  
147 - }  
148 -  
149 - memset(&_st_this_vp, 0, sizeof(_st_vp_t));  
150 -  
151 - ST_INIT_CLIST(&_ST_RUNQ);  
152 - ST_INIT_CLIST(&_ST_IOQ);  
153 - ST_INIT_CLIST(&_ST_ZOMBIEQ);  
154 -#ifdef DEBUG  
155 - ST_INIT_CLIST(&_ST_THREADQ);  
156 -#endif  
157 -  
158 - if ((*_st_eventsys->init)() < 0) {  
159 - return -1;  
160 - }  
161 -  
162 - _st_this_vp.pagesize = getpagesize();  
163 - _st_this_vp.last_clock = st_utime();  
164 -  
165 - /*  
166 - * Create idle thread  
167 - */  
168 - _st_this_vp.idle_thread = st_thread_create(_st_idle_thread_start, NULL, 0, 0);  
169 - if (!_st_this_vp.idle_thread) {  
170 - return -1;  
171 - }  
172 - _st_this_vp.idle_thread->flags = _ST_FL_IDLE_THREAD;  
173 - _st_active_count--;  
174 - _ST_DEL_RUNQ(_st_this_vp.idle_thread);  
175 -  
176 - /*  
177 - * Initialize primordial thread  
178 - */  
179 - trd = (_st_thread_t *) calloc(1, sizeof(_st_thread_t) +  
180 - (ST_KEYS_MAX * sizeof(void *)));  
181 - if (!trd) {  
182 - return -1;  
183 - }  
184 - trd->private_data = (void **) (trd + 1);  
185 - trd->state = _ST_ST_RUNNING;  
186 - trd->flags = _ST_FL_PRIMORDIAL;  
187 - _ST_SET_CURRENT_THREAD(trd);  
188 - _st_active_count++;  
189 -#ifdef DEBUG  
190 - _ST_ADD_THREADQ(trd);  
191 -#endif  
192 -  
193 - return 0;  
194 -}  
195 -  
196 -#ifdef ST_SWITCH_CB  
197 -st_switch_cb_t st_set_switch_in_cb(st_switch_cb_t cb)  
198 -{  
199 - st_switch_cb_t ocb = _st_this_vp.switch_in_cb;  
200 - _st_this_vp.switch_in_cb = cb;  
201 - return ocb;  
202 -}  
203 -  
204 -st_switch_cb_t st_set_switch_out_cb(st_switch_cb_t cb)  
205 -{  
206 - st_switch_cb_t ocb = _st_this_vp.switch_out_cb;  
207 - _st_this_vp.switch_out_cb = cb;  
208 - return ocb;  
209 -}  
210 -#endif  
211 -  
212 -/*  
213 - * Start function for the idle thread  
214 - */  
215 -/* ARGSUSED */  
216 -void *_st_idle_thread_start(void *arg)  
217 -{  
218 - _st_thread_t *me = _ST_CURRENT_THREAD();  
219 -  
220 - while (_st_active_count > 0) {  
221 - /* Idle vp till I/O is ready or the smallest timeout expired */  
222 - _ST_VP_IDLE();  
223 -  
224 - /* Check sleep queue for expired threads */  
225 - _st_vp_check_clock();  
226 -  
227 - me->state = _ST_ST_RUNNABLE;  
228 - _ST_SWITCH_CONTEXT(me);  
229 - }  
230 -  
231 - /* No more threads */  
232 - exit(0);  
233 -  
234 - /* NOTREACHED */  
235 - return NULL;  
236 -}  
237 -  
238 -void st_thread_exit(void *retval)  
239 -{  
240 - _st_thread_t *trd = _ST_CURRENT_THREAD();  
241 -  
242 - trd->retval = retval;  
243 - _st_thread_cleanup(trd);  
244 - _st_active_count--;  
245 - if (trd->term) {  
246 - /* Put thread on the zombie queue */  
247 - trd->state = _ST_ST_ZOMBIE;  
248 - _ST_ADD_ZOMBIEQ(trd);  
249 -  
250 - /* Notify on our termination condition variable */  
251 - st_cond_signal(trd->term);  
252 -  
253 - /* Switch context and come back later */  
254 - _ST_SWITCH_CONTEXT(trd);  
255 -  
256 - /* Continue the cleanup */  
257 - st_cond_destroy(trd->term);  
258 - trd->term = NULL;  
259 - }  
260 -  
261 -#ifdef DEBUG  
262 - _ST_DEL_THREADQ(trd);  
263 -#endif  
264 -  
265 - if (!(trd->flags & _ST_FL_PRIMORDIAL)) {  
266 - _st_stack_free(trd->stack);  
267 - }  
268 -  
269 - /* Find another thread to run */  
270 - _ST_SWITCH_CONTEXT(trd);  
271 - /* Not going to land here */  
272 -}  
273 -  
274 -int st_thread_join(_st_thread_t *trd, void **retvalp)  
275 -{  
276 - _st_cond_t *term = trd->term;  
277 -  
278 - /* Can't join a non-joinable thread */  
279 - if (term == NULL) {  
280 - errno = EINVAL;  
281 - return -1;  
282 - }  
283 - if (_ST_CURRENT_THREAD() == trd) {  
284 - errno = EDEADLK;  
285 - return -1;  
286 - }  
287 -  
288 - /* Multiple threads can't wait on the same joinable thread */  
289 - if (term->wait_q.next != &term->wait_q) {  
290 - errno = EINVAL;  
291 - return -1;  
292 - }  
293 -  
294 - while (trd->state != _ST_ST_ZOMBIE) {  
295 - if (st_cond_timedwait(term, ST_UTIME_NO_TIMEOUT) != 0) {  
296 - return -1;  
297 - }  
298 - }  
299 -  
300 - if (retvalp) {  
301 - *retvalp = trd->retval;  
302 - }  
303 -  
304 - /*  
305 - * Remove target thread from the zombie queue and make it runnable.  
306 - * When it gets scheduled later, it will do the clean up.  
307 - */  
308 - trd->state = _ST_ST_RUNNABLE;  
309 - _ST_DEL_ZOMBIEQ(trd);  
310 - _ST_ADD_RUNQ(trd);  
311 -  
312 - return 0;  
313 -}  
314 -  
315 -void _st_thread_main(void)  
316 -{  
317 - _st_thread_t *trd = _ST_CURRENT_THREAD();  
318 -  
319 - /*  
320 - * Cap the stack by zeroing out the saved return address register  
321 - * value. This allows some debugging/profiling tools to know when  
322 - * to stop unwinding the stack. It's a no-op on most platforms.  
323 - */  
324 - MD_CAP_STACK(&trd);  
325 -  
326 - /* Run thread main */  
327 - trd->retval = (*trd->start)(trd->arg);  
328 -  
329 - /* All done, time to go away */  
330 - st_thread_exit(trd->retval);  
331 -}  
332 -  
333 -/*  
334 - * Insert "thread" into the timeout heap, in the position  
335 - * specified by thread->heap_index. See docs/timeout_heap.txt  
336 - * for details about the timeout heap.  
337 - */  
338 -static _st_thread_t **heap_insert(_st_thread_t *trd)  
339 -{  
340 - int target = trd->heap_index;  
341 - int s = target;  
342 - _st_thread_t **p = &_ST_SLEEPQ;  
343 - int bits = 0;  
344 - int bit;  
345 - int index = 1;  
346 -  
347 - while (s) {  
348 - s >>= 1;  
349 - bits++;  
350 - }  
351 -  
352 - for (bit = bits - 2; bit >= 0; bit--) {  
353 - if (trd->due < (*p)->due) {  
354 - _st_thread_t *t = *p;  
355 - trd->left = t->left;  
356 - trd->right = t->right;  
357 - *p = trd;  
358 - trd->heap_index = index;  
359 - trd = t;  
360 - }  
361 - index <<= 1;  
362 - if (target & (1 << bit)) {  
363 - p = &((*p)->right);  
364 - index |= 1;  
365 - } else {  
366 - p = &((*p)->left);  
367 - }  
368 - }  
369 -  
370 - trd->heap_index = index;  
371 - *p = trd;  
372 - trd->left = trd->right = NULL;  
373 -  
374 - return p;  
375 -}  
376 -  
377 -/*  
378 - * Delete "thread" from the timeout heap.  
379 - */  
380 -static void heap_delete(_st_thread_t *trd)  
381 -{  
382 - _st_thread_t *t, **p;  
383 - int bits = 0;  
384 - int s, bit;  
385 -  
386 - /* First find and unlink the last heap element */  
387 - p = &_ST_SLEEPQ;  
388 - s = _ST_SLEEPQ_SIZE;  
389 - while (s) {  
390 - s >>= 1;  
391 - bits++;  
392 - }  
393 -  
394 - for (bit = bits - 2; bit >= 0; bit--) {  
395 - if (_ST_SLEEPQ_SIZE & (1 << bit)) {  
396 - p = &((*p)->right);  
397 - } else {  
398 - p = &((*p)->left);  
399 - }  
400 - }  
401 -  
402 - t = *p;  
403 - *p = NULL;  
404 - --_ST_SLEEPQ_SIZE;  
405 - if (t != trd) {  
406 - /*  
407 - * Insert the unlinked last element in place of the element we are deleting  
408 - */  
409 - t->heap_index = trd->heap_index;  
410 - p = heap_insert(t);  
411 - t = *p;  
412 - t->left = trd->left;  
413 - t->right = trd->right;  
414 -  
415 - /*  
416 - * Reestablish the heap invariant.  
417 - */  
418 - for (;;) {  
419 - _st_thread_t *y; /* The younger child */  
420 - int index_tmp;  
421 -  
422 - if (t->left == NULL) {  
423 - break;  
424 - } else if (t->right == NULL) {  
425 - y = t->left;  
426 - } else if (t->left->due < t->right->due) {  
427 - y = t->left;  
428 - } else {  
429 - y = t->right;  
430 - }  
431 -  
432 - if (t->due > y->due) {  
433 - _st_thread_t *tl = y->left;  
434 - _st_thread_t *tr = y->right;  
435 - *p = y;  
436 - if (y == t->left) {  
437 - y->left = t;  
438 - y->right = t->right;  
439 - p = &y->left;  
440 - } else {  
441 - y->left = t->left;  
442 - y->right = t;  
443 - p = &y->right;  
444 - }  
445 - t->left = tl;  
446 - t->right = tr;  
447 - index_tmp = t->heap_index;  
448 - t->heap_index = y->heap_index;  
449 - y->heap_index = index_tmp;  
450 - } else {  
451 - break;  
452 - }  
453 - }  
454 - }  
455 -  
456 - trd->left = trd->right = NULL;  
457 -}  
458 -  
459 -void _st_add_sleep_q(_st_thread_t *trd, st_utime_t timeout)  
460 -{  
461 - trd->due = _ST_LAST_CLOCK + timeout;  
462 - trd->flags |= _ST_FL_ON_SLEEPQ;  
463 - trd->heap_index = ++_ST_SLEEPQ_SIZE;  
464 - heap_insert(trd);  
465 -}  
466 -  
467 -void _st_del_sleep_q(_st_thread_t *trd)  
468 -{  
469 - heap_delete(trd);  
470 - trd->flags &= ~_ST_FL_ON_SLEEPQ;  
471 -}  
472 -  
473 -void _st_vp_check_clock(void)  
474 -{  
475 - _st_thread_t *trd;  
476 - st_utime_t elapsed, now;  
477 -  
478 - now = st_utime();  
479 - elapsed = now - _ST_LAST_CLOCK;  
480 - _ST_LAST_CLOCK = now;  
481 -  
482 - if (_st_curr_time && now - _st_last_tset > 999000) {  
483 - _st_curr_time = time(NULL);  
484 - _st_last_tset = now;  
485 - }  
486 -  
487 - while (_ST_SLEEPQ != NULL) {  
488 - trd = _ST_SLEEPQ;  
489 - ST_ASSERT(trd->flags & _ST_FL_ON_SLEEPQ);  
490 - if (trd->due > now) {  
491 - break;  
492 - }  
493 - _ST_DEL_SLEEPQ(trd);  
494 -  
495 - /* If thread is waiting on condition variable, set the time out flag */  
496 - if (trd->state == _ST_ST_COND_WAIT) {  
497 - trd->flags |= _ST_FL_TIMEDOUT;  
498 - }  
499 -  
500 - /* Make thread runnable */  
501 - ST_ASSERT(!(trd->flags & _ST_FL_IDLE_THREAD));  
502 - trd->state = _ST_ST_RUNNABLE;  
503 - _ST_ADD_RUNQ(trd);  
504 - }  
505 -}  
506 -  
507 -void st_thread_interrupt(_st_thread_t* trd)  
508 -{  
509 - /* If thread is already dead */  
510 - if (trd->state == _ST_ST_ZOMBIE) {  
511 - return;  
512 - }  
513 -  
514 - trd->flags |= _ST_FL_INTERRUPT;  
515 -  
516 - if (trd->state == _ST_ST_RUNNING || trd->state == _ST_ST_RUNNABLE) {  
517 - return;  
518 - }  
519 -  
520 - if (trd->flags & _ST_FL_ON_SLEEPQ) {  
521 - _ST_DEL_SLEEPQ(trd);  
522 - }  
523 -  
524 - /* Make thread runnable */  
525 - trd->state = _ST_ST_RUNNABLE;  
526 - _ST_ADD_RUNQ(trd);  
527 -}  
528 -  
529 -_st_thread_t *st_thread_create(void *(*start)(void *arg), void *arg, int joinable, int stk_size)  
530 -{  
531 - _st_thread_t *trd;  
532 - _st_stack_t *stack;  
533 - void **ptds;  
534 - char *sp;  
535 -  
536 - /* Adjust stack size */  
537 - if (stk_size == 0) {  
538 - stk_size = ST_DEFAULT_STACK_SIZE;  
539 - }  
540 - stk_size = ((stk_size + _ST_PAGE_SIZE - 1) / _ST_PAGE_SIZE) * _ST_PAGE_SIZE;  
541 - stack = _st_stack_new(stk_size);  
542 - if (!stack) {  
543 - return NULL;  
544 - }  
545 -  
546 - /* Allocate thread object and per-thread data off the stack */  
547 -#if defined (MD_STACK_GROWS_DOWN)  
548 - sp = stack->stk_top;  
549 - /*  
550 - * The stack segment is split in the middle. The upper half is used  
551 - * as backing store for the register stack which grows upward.  
552 - * The lower half is used for the traditional memory stack which  
553 - * grows downward. Both stacks start in the middle and grow outward  
554 - * from each other.  
555 - */  
556 - /**  
557 - The below comments is by winlin:  
558 - The Stack public structure:  
559 - +--------------------------------------------------------------+  
560 - | stack |  
561 - +--------------------------------------------------------------+  
562 - bottom top  
563 - The code bellow use the stack as:  
564 - +-----------------+-----------------+-------------+------------+  
565 - | stack of thread |pad+align(128B+) |thread(336B) | keys(128B) |  
566 - +-----------------+-----------------+-------------+------------+  
567 - bottom sp trd ptds top  
568 - (context[0].__jmpbuf.sp) (private_data)  
569 - */  
570 - sp = sp - (ST_KEYS_MAX * sizeof(void *));  
571 - ptds = (void **) sp;  
572 - sp = sp - sizeof(_st_thread_t);  
573 - trd = (_st_thread_t *) sp;  
574 -  
575 - /* Make stack 64-byte aligned */  
576 - if ((unsigned long)sp & 0x3f) {  
577 - sp = sp - ((unsigned long)sp & 0x3f);  
578 - }  
579 - stack->sp = sp - _ST_STACK_PAD_SIZE;  
580 -#else  
581 - #error "Only Supports Stack Grown Down"  
582 -#endif  
583 -  
584 - memset(trd, 0, sizeof(_st_thread_t));  
585 - memset(ptds, 0, ST_KEYS_MAX * sizeof(void *));  
586 -  
587 - /* Initialize thread */  
588 - trd->private_data = ptds;  
589 - trd->stack = stack;  
590 - trd->start = start;  
591 - trd->arg = arg;  
592 -  
593 -// by winlin, expand macro MD_INIT_CONTEXT  
594 -#if defined(__mips__)  
595 - MD_SETJMP((trd)->context);  
596 - trd->context[0].__jmpbuf[0].__pc = (__ptr_t) _st_thread_main;  
597 - trd->context[0].__jmpbuf[0].__sp = stack->sp;  
598 -#else  
599 - int ret_setjmp = 0;  
600 - if ((ret_setjmp = MD_SETJMP((trd)->context)) != 0) {  
601 - _st_thread_main();  
602 - }  
603 - MD_GET_SP(trd) = (long) (stack->sp);  
604 -#endif  
605 -  
606 - /* If thread is joinable, allocate a termination condition variable */  
607 - if (joinable) {  
608 - trd->term = st_cond_new();  
609 - if (trd->term == NULL) {  
610 - _st_stack_free(trd->stack);  
611 - return NULL;  
612 - }  
613 - }  
614 -  
615 - /* Make thread runnable */  
616 - trd->state = _ST_ST_RUNNABLE;  
617 - _st_active_count++;  
618 - _ST_ADD_RUNQ(trd);  
619 -#ifdef DEBUG  
620 - _ST_ADD_THREADQ(trd);  
621 -#endif  
622 -  
623 - return trd;  
624 -}  
625 -  
626 -_st_thread_t *st_thread_self(void)  
627 -{  
628 - return _ST_CURRENT_THREAD();  
629 -}  
630 -  
631 -#ifdef DEBUG  
632 -/* ARGSUSED */  
633 -void _st_show_thread_stack(_st_thread_t *trd, const char *messg)  
634 -{  
635 -}  
636 -  
637 -/* To be set from debugger */  
638 -int _st_iterate_threads_flag = 0;  
639 -  
640 -void _st_iterate_threads(void)  
641 -{  
642 - static _st_thread_t *trd = NULL;  
643 - static jmp_buf orig_jb, save_jb;  
644 - _st_clist_t *q;  
645 -  
646 - if (!_st_iterate_threads_flag) {  
647 - if (trd) {  
648 - memcpy(trd->context, save_jb, sizeof(jmp_buf));  
649 - MD_LONGJMP(orig_jb, 1);  
650 - }  
651 - return;  
652 - }  
653 -  
654 - if (trd) {  
655 - memcpy(trd->context, save_jb, sizeof(jmp_buf));  
656 - _st_show_thread_stack(trd, NULL);  
657 - } else {  
658 - if (MD_SETJMP(orig_jb)) {  
659 - _st_iterate_threads_flag = 0;  
660 - trd = NULL;  
661 - _st_show_thread_stack(trd, "Iteration completed");  
662 - return;  
663 - }  
664 - trd = _ST_CURRENT_THREAD();  
665 - _st_show_thread_stack(trd, "Iteration started");  
666 - }  
667 -  
668 - q = trd->tlink.next;  
669 - if (q == &_ST_THREADQ) {  
670 - q = q->next;  
671 - }  
672 - ST_ASSERT(q != &_ST_THREADQ);  
673 - trd = _ST_THREAD_THREADQ_PTR(q);  
674 - if (trd == _ST_CURRENT_THREAD()) {  
675 - MD_LONGJMP(orig_jb, 1);  
676 - }  
677 - memcpy(save_jb, trd->context, sizeof(jmp_buf));  
678 - MD_LONGJMP(trd->context, 1);  
679 -}  
680 -#endif /* DEBUG */  
681 - 1 +/*
  2 + * The contents of this file are subject to the Mozilla Public
  3 + * License Version 1.1 (the "License"); you may not use this file
  4 + * except in compliance with the License. You may obtain a copy of
  5 + * the License at http://www.mozilla.org/MPL/
  6 + *
  7 + * Software distributed under the License is distributed on an "AS
  8 + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
  9 + * implied. See the License for the specific language governing
  10 + * rights and limitations under the License.
  11 + *
  12 + * The Original Code is the Netscape Portable Runtime library.
  13 + *
  14 + * The Initial Developer of the Original Code is Netscape
  15 + * Communications Corporation. Portions created by Netscape are
  16 + * Copyright (C) 1994-2000 Netscape Communications Corporation. All
  17 + * Rights Reserved.
  18 + *
  19 + * Contributor(s): Silicon Graphics, Inc.
  20 + *
  21 + * Portions created by SGI are Copyright (C) 2000-2001 Silicon
  22 + * Graphics, Inc. All Rights Reserved.
  23 + *
  24 + * Alternatively, the contents of this file may be used under the
  25 + * terms of the GNU General Public License Version 2 or later (the
  26 + * "GPL"), in which case the provisions of the GPL are applicable
  27 + * instead of those above. If you wish to allow use of your
  28 + * version of this file only under the terms of the GPL and not to
  29 + * allow others to use your version of this file under the MPL,
  30 + * indicate your decision by deleting the provisions above and
  31 + * replace them with the notice and other provisions required by
  32 + * the GPL. If you do not delete the provisions above, a recipient
  33 + * may use your version of this file under either the MPL or the
  34 + * GPL.
  35 + */
  36 +
  37 +/*
  38 + * This file is derived directly from Netscape Communications Corporation,
  39 + * and consists of extensive modifications made during the year(s) 1999-2000.
  40 + */
  41 +
  42 +#include <stdlib.h>
  43 +#include <unistd.h>
  44 +#include <fcntl.h>
  45 +#include <string.h>
  46 +#include <time.h>
  47 +#include <errno.h>
  48 +#include "common.h"
  49 +
  50 +/* Global data */
  51 +_st_vp_t _st_this_vp; /* This VP */
  52 +_st_thread_t *_st_this_thread; /* Current thread */
  53 +int _st_active_count = 0; /* Active thread count */
  54 +
  55 +time_t _st_curr_time = 0; /* Current time as returned by time(2) */
  56 +st_utime_t _st_last_tset; /* Last time it was fetched */
  57 +
  58 +int st_poll(struct pollfd *pds, int npds, st_utime_t timeout)
  59 +{
  60 + struct pollfd *pd;
  61 + struct pollfd *epd = pds + npds;
  62 + _st_pollq_t pq;
  63 + _st_thread_t *me = _ST_CURRENT_THREAD();
  64 + int n;
  65 +
  66 + if (me->flags & _ST_FL_INTERRUPT) {
  67 + me->flags &= ~_ST_FL_INTERRUPT;
  68 + errno = EINTR;
  69 + return -1;
  70 + }
  71 +
  72 + if ((*_st_eventsys->pollset_add)(pds, npds) < 0) {
  73 + return -1;
  74 + }
  75 +
  76 + pq.pds = pds;
  77 + pq.npds = npds;
  78 + pq.thread = me;
  79 + pq.on_ioq = 1;
  80 + _ST_ADD_IOQ(pq);
  81 + if (timeout != ST_UTIME_NO_TIMEOUT) {
  82 + _ST_ADD_SLEEPQ(me, timeout);
  83 + }
  84 + me->state = _ST_ST_IO_WAIT;
  85 +
  86 + _ST_SWITCH_CONTEXT(me);
  87 +
  88 + n = 0;
  89 + if (pq.on_ioq) {
  90 + /* If we timed out, the pollq might still be on the ioq. Remove it */
  91 + _ST_DEL_IOQ(pq);
  92 + (*_st_eventsys->pollset_del)(pds, npds);
  93 + } else {
  94 + /* Count the number of ready descriptors */
  95 + for (pd = pds; pd < epd; pd++) {
  96 + if (pd->revents) {
  97 + n++;
  98 + }
  99 + }
  100 + }
  101 +
  102 + if (me->flags & _ST_FL_INTERRUPT) {
  103 + me->flags &= ~_ST_FL_INTERRUPT;
  104 + errno = EINTR;
  105 + return -1;
  106 + }
  107 +
  108 + return n;
  109 +}
  110 +
  111 +void _st_vp_schedule(void)
  112 +{
  113 + _st_thread_t *trd;
  114 +
  115 + if (_ST_RUNQ.next != &_ST_RUNQ) {
  116 + /* Pull thread off of the run queue */
  117 + trd = _ST_THREAD_PTR(_ST_RUNQ.next);
  118 + _ST_DEL_RUNQ(trd);
  119 + } else {
  120 + /* If there are no threads to run, switch to the idle thread */
  121 + trd = _st_this_vp.idle_thread;
  122 + }
  123 + ST_ASSERT(trd->state == _ST_ST_RUNNABLE);
  124 +
  125 + /* Resume the thread */
  126 + trd->state = _ST_ST_RUNNING;
  127 + _ST_RESTORE_CONTEXT(trd);
  128 +}
  129 +
  130 +/*
  131 + * Initialize this Virtual Processor
  132 + */
  133 +int st_init(void)
  134 +{
  135 + _st_thread_t *trd;
  136 +
  137 + if (_st_active_count) {
  138 + /* Already initialized */
  139 + return 0;
  140 + }
  141 +
  142 + /* We can ignore return value here */
  143 + st_set_eventsys(ST_EVENTSYS_DEFAULT);
  144 +
  145 + if (_st_io_init() < 0) {
  146 + return -1;
  147 + }
  148 +
  149 + memset(&_st_this_vp, 0, sizeof(_st_vp_t));
  150 +
  151 + ST_INIT_CLIST(&_ST_RUNQ);
  152 + ST_INIT_CLIST(&_ST_IOQ);
  153 + ST_INIT_CLIST(&_ST_ZOMBIEQ);
  154 +#ifdef DEBUG
  155 + ST_INIT_CLIST(&_ST_THREADQ);
  156 +#endif
  157 +
  158 + if ((*_st_eventsys->init)() < 0) {
  159 + return -1;
  160 + }
  161 +
  162 + _st_this_vp.pagesize = getpagesize();
  163 + _st_this_vp.last_clock = st_utime();
  164 +
  165 + /*
  166 + * Create idle thread
  167 + */
  168 + _st_this_vp.idle_thread = st_thread_create(_st_idle_thread_start, NULL, 0, 0);
  169 + if (!_st_this_vp.idle_thread) {
  170 + return -1;
  171 + }
  172 + _st_this_vp.idle_thread->flags = _ST_FL_IDLE_THREAD;
  173 + _st_active_count--;
  174 + _ST_DEL_RUNQ(_st_this_vp.idle_thread);
  175 +
  176 + /*
  177 + * Initialize primordial thread
  178 + */
  179 + trd = (_st_thread_t *) calloc(1, sizeof(_st_thread_t) +
  180 + (ST_KEYS_MAX * sizeof(void *)));
  181 + if (!trd) {
  182 + return -1;
  183 + }
  184 + trd->private_data = (void **) (trd + 1);
  185 + trd->state = _ST_ST_RUNNING;
  186 + trd->flags = _ST_FL_PRIMORDIAL;
  187 + _ST_SET_CURRENT_THREAD(trd);
  188 + _st_active_count++;
  189 +#ifdef DEBUG
  190 + _ST_ADD_THREADQ(trd);
  191 +#endif
  192 +
  193 + return 0;
  194 +}
  195 +
  196 +#ifdef ST_SWITCH_CB
  197 +st_switch_cb_t st_set_switch_in_cb(st_switch_cb_t cb)
  198 +{
  199 + st_switch_cb_t ocb = _st_this_vp.switch_in_cb;
  200 + _st_this_vp.switch_in_cb = cb;
  201 + return ocb;
  202 +}
  203 +
  204 +st_switch_cb_t st_set_switch_out_cb(st_switch_cb_t cb)
  205 +{
  206 + st_switch_cb_t ocb = _st_this_vp.switch_out_cb;
  207 + _st_this_vp.switch_out_cb = cb;
  208 + return ocb;
  209 +}
  210 +#endif
  211 +
  212 +/*
  213 + * Start function for the idle thread
  214 + */
  215 +/* ARGSUSED */
  216 +void *_st_idle_thread_start(void *arg)
  217 +{
  218 + _st_thread_t *me = _ST_CURRENT_THREAD();
  219 +
  220 + while (_st_active_count > 0) {
  221 + /* Idle vp till I/O is ready or the smallest timeout expired */
  222 + _ST_VP_IDLE();
  223 +
  224 + /* Check sleep queue for expired threads */
  225 + _st_vp_check_clock();
  226 +
  227 + me->state = _ST_ST_RUNNABLE;
  228 + _ST_SWITCH_CONTEXT(me);
  229 + }
  230 +
  231 + /* No more threads */
  232 + exit(0);
  233 +
  234 + /* NOTREACHED */
  235 + return NULL;
  236 +}
  237 +
  238 +void st_thread_exit(void *retval)
  239 +{
  240 + _st_thread_t *trd = _ST_CURRENT_THREAD();
  241 +
  242 + trd->retval = retval;
  243 + _st_thread_cleanup(trd);
  244 + _st_active_count--;
  245 + if (trd->term) {
  246 + /* Put thread on the zombie queue */
  247 + trd->state = _ST_ST_ZOMBIE;
  248 + _ST_ADD_ZOMBIEQ(trd);
  249 +
  250 + /* Notify on our termination condition variable */
  251 + st_cond_signal(trd->term);
  252 +
  253 + /* Switch context and come back later */
  254 + _ST_SWITCH_CONTEXT(trd);
  255 +
  256 + /* Continue the cleanup */
  257 + st_cond_destroy(trd->term);
  258 + trd->term = NULL;
  259 + }
  260 +
  261 +#ifdef DEBUG
  262 + _ST_DEL_THREADQ(trd);
  263 +#endif
  264 +
  265 + if (!(trd->flags & _ST_FL_PRIMORDIAL)) {
  266 + _st_stack_free(trd->stack);
  267 + }
  268 +
  269 + /* Find another thread to run */
  270 + _ST_SWITCH_CONTEXT(trd);
  271 + /* Not going to land here */
  272 +}
  273 +
  274 +int st_thread_join(_st_thread_t *trd, void **retvalp)
  275 +{
  276 + _st_cond_t *term = trd->term;
  277 +
  278 + /* Can't join a non-joinable thread */
  279 + if (term == NULL) {
  280 + errno = EINVAL;
  281 + return -1;
  282 + }
  283 + if (_ST_CURRENT_THREAD() == trd) {
  284 + errno = EDEADLK;
  285 + return -1;
  286 + }
  287 +
  288 + /* Multiple threads can't wait on the same joinable thread */
  289 + if (term->wait_q.next != &term->wait_q) {
  290 + errno = EINVAL;
  291 + return -1;
  292 + }
  293 +
  294 + while (trd->state != _ST_ST_ZOMBIE) {
  295 + if (st_cond_timedwait(term, ST_UTIME_NO_TIMEOUT) != 0) {
  296 + return -1;
  297 + }
  298 + }
  299 +
  300 + if (retvalp) {
  301 + *retvalp = trd->retval;
  302 + }
  303 +
  304 + /*
  305 + * Remove target thread from the zombie queue and make it runnable.
  306 + * When it gets scheduled later, it will do the clean up.
  307 + */
  308 + trd->state = _ST_ST_RUNNABLE;
  309 + _ST_DEL_ZOMBIEQ(trd);
  310 + _ST_ADD_RUNQ(trd);
  311 +
  312 + return 0;
  313 +}
  314 +
  315 +void _st_thread_main(void)
  316 +{
  317 + _st_thread_t *trd = _ST_CURRENT_THREAD();
  318 +
  319 + /*
  320 + * Cap the stack by zeroing out the saved return address register
  321 + * value. This allows some debugging/profiling tools to know when
  322 + * to stop unwinding the stack. It's a no-op on most platforms.
  323 + */
  324 + MD_CAP_STACK(&trd);
  325 +
  326 + /* Run thread main */
  327 + trd->retval = (*trd->start)(trd->arg);
  328 +
  329 + /* All done, time to go away */
  330 + st_thread_exit(trd->retval);
  331 +}
  332 +
  333 +/*
  334 + * Insert "thread" into the timeout heap, in the position
  335 + * specified by thread->heap_index. See docs/timeout_heap.txt
  336 + * for details about the timeout heap.
  337 + */
  338 +static _st_thread_t **heap_insert(_st_thread_t *trd)
  339 +{
  340 + int target = trd->heap_index;
  341 + int s = target;
  342 + _st_thread_t **p = &_ST_SLEEPQ;
  343 + int bits = 0;
  344 + int bit;
  345 + int index = 1;
  346 +
  347 + while (s) {
  348 + s >>= 1;
  349 + bits++;
  350 + }
  351 +
  352 + for (bit = bits - 2; bit >= 0; bit--) {
  353 + if (trd->due < (*p)->due) {
  354 + _st_thread_t *t = *p;
  355 + trd->left = t->left;
  356 + trd->right = t->right;
  357 + *p = trd;
  358 + trd->heap_index = index;
  359 + trd = t;
  360 + }
  361 + index <<= 1;
  362 + if (target & (1 << bit)) {
  363 + p = &((*p)->right);
  364 + index |= 1;
  365 + } else {
  366 + p = &((*p)->left);
  367 + }
  368 + }
  369 +
  370 + trd->heap_index = index;
  371 + *p = trd;
  372 + trd->left = trd->right = NULL;
  373 +
  374 + return p;
  375 +}
  376 +
  377 +/*
  378 + * Delete "thread" from the timeout heap.
  379 + */
  380 +static void heap_delete(_st_thread_t *trd)
  381 +{
  382 + _st_thread_t *t, **p;
  383 + int bits = 0;
  384 + int s, bit;
  385 +
  386 + /* First find and unlink the last heap element */
  387 + p = &_ST_SLEEPQ;
  388 + s = _ST_SLEEPQ_SIZE;
  389 + while (s) {
  390 + s >>= 1;
  391 + bits++;
  392 + }
  393 +
  394 + for (bit = bits - 2; bit >= 0; bit--) {
  395 + if (_ST_SLEEPQ_SIZE & (1 << bit)) {
  396 + p = &((*p)->right);
  397 + } else {
  398 + p = &((*p)->left);
  399 + }
  400 + }
  401 +
  402 + t = *p;
  403 + *p = NULL;
  404 + --_ST_SLEEPQ_SIZE;
  405 + if (t != trd) {
  406 + /*
  407 + * Insert the unlinked last element in place of the element we are deleting
  408 + */
  409 + t->heap_index = trd->heap_index;
  410 + p = heap_insert(t);
  411 + t = *p;
  412 + t->left = trd->left;
  413 + t->right = trd->right;
  414 +
  415 + /*
  416 + * Reestablish the heap invariant.
  417 + */
  418 + for (;;) {
  419 + _st_thread_t *y; /* The younger child */
  420 + int index_tmp;
  421 +
  422 + if (t->left == NULL) {
  423 + break;
  424 + } else if (t->right == NULL) {
  425 + y = t->left;
  426 + } else if (t->left->due < t->right->due) {
  427 + y = t->left;
  428 + } else {
  429 + y = t->right;
  430 + }
  431 +
  432 + if (t->due > y->due) {
  433 + _st_thread_t *tl = y->left;
  434 + _st_thread_t *tr = y->right;
  435 + *p = y;
  436 + if (y == t->left) {
  437 + y->left = t;
  438 + y->right = t->right;
  439 + p = &y->left;
  440 + } else {
  441 + y->left = t->left;
  442 + y->right = t;
  443 + p = &y->right;
  444 + }
  445 + t->left = tl;
  446 + t->right = tr;
  447 + index_tmp = t->heap_index;
  448 + t->heap_index = y->heap_index;
  449 + y->heap_index = index_tmp;
  450 + } else {
  451 + break;
  452 + }
  453 + }
  454 + }
  455 +
  456 + trd->left = trd->right = NULL;
  457 +}
  458 +
  459 +void _st_add_sleep_q(_st_thread_t *trd, st_utime_t timeout)
  460 +{
  461 + trd->due = _ST_LAST_CLOCK + timeout;
  462 + trd->flags |= _ST_FL_ON_SLEEPQ;
  463 + trd->heap_index = ++_ST_SLEEPQ_SIZE;
  464 + heap_insert(trd);
  465 +}
  466 +
  467 +void _st_del_sleep_q(_st_thread_t *trd)
  468 +{
  469 + heap_delete(trd);
  470 + trd->flags &= ~_ST_FL_ON_SLEEPQ;
  471 +}
  472 +
  473 +void _st_vp_check_clock(void)
  474 +{
  475 + _st_thread_t *trd;
  476 + st_utime_t elapsed, now;
  477 +
  478 + now = st_utime();
  479 + elapsed = now - _ST_LAST_CLOCK;
  480 + _ST_LAST_CLOCK = now;
  481 +
  482 + if (_st_curr_time && now - _st_last_tset > 999000) {
  483 + _st_curr_time = time(NULL);
  484 + _st_last_tset = now;
  485 + }
  486 +
  487 + while (_ST_SLEEPQ != NULL) {
  488 + trd = _ST_SLEEPQ;
  489 + ST_ASSERT(trd->flags & _ST_FL_ON_SLEEPQ);
  490 + if (trd->due > now) {
  491 + break;
  492 + }
  493 + _ST_DEL_SLEEPQ(trd);
  494 +
  495 + /* If thread is waiting on condition variable, set the time out flag */
  496 + if (trd->state == _ST_ST_COND_WAIT) {
  497 + trd->flags |= _ST_FL_TIMEDOUT;
  498 + }
  499 +
  500 + /* Make thread runnable */
  501 + ST_ASSERT(!(trd->flags & _ST_FL_IDLE_THREAD));
  502 + trd->state = _ST_ST_RUNNABLE;
  503 + _ST_ADD_RUNQ(trd);
  504 + }
  505 +}
  506 +
  507 +void st_thread_interrupt(_st_thread_t* trd)
  508 +{
  509 + /* If thread is already dead */
  510 + if (trd->state == _ST_ST_ZOMBIE) {
  511 + return;
  512 + }
  513 +
  514 + trd->flags |= _ST_FL_INTERRUPT;
  515 +
  516 + if (trd->state == _ST_ST_RUNNING || trd->state == _ST_ST_RUNNABLE) {
  517 + return;
  518 + }
  519 +
  520 + if (trd->flags & _ST_FL_ON_SLEEPQ) {
  521 + _ST_DEL_SLEEPQ(trd);
  522 + }
  523 +
  524 + /* Make thread runnable */
  525 + trd->state = _ST_ST_RUNNABLE;
  526 + _ST_ADD_RUNQ(trd);
  527 +}
  528 +
  529 +_st_thread_t *st_thread_create(void *(*start)(void *arg), void *arg, int joinable, int stk_size)
  530 +{
  531 + _st_thread_t *trd;
  532 + _st_stack_t *stack;
  533 + void **ptds;
  534 + char *sp;
  535 +
  536 + /* Adjust stack size */
  537 + if (stk_size == 0) {
  538 + stk_size = ST_DEFAULT_STACK_SIZE;
  539 + }
  540 + stk_size = ((stk_size + _ST_PAGE_SIZE - 1) / _ST_PAGE_SIZE) * _ST_PAGE_SIZE;
  541 + stack = _st_stack_new(stk_size);
  542 + if (!stack) {
  543 + return NULL;
  544 + }
  545 +
  546 + /* Allocate thread object and per-thread data off the stack */
  547 +#if defined (MD_STACK_GROWS_DOWN)
  548 + sp = stack->stk_top;
  549 + /*
  550 + * The stack segment is split in the middle. The upper half is used
  551 + * as backing store for the register stack which grows upward.
  552 + * The lower half is used for the traditional memory stack which
  553 + * grows downward. Both stacks start in the middle and grow outward
  554 + * from each other.
  555 + */
  556 + /**
  557 + The below comments is by winlin:
  558 + The Stack public structure:
  559 + +--------------------------------------------------------------+
  560 + | stack |
  561 + +--------------------------------------------------------------+
  562 + bottom top
  563 + The code bellow use the stack as:
  564 + +-----------------+-----------------+-------------+------------+
  565 + | stack of thread |pad+align(128B+) |thread(336B) | keys(128B) |
  566 + +-----------------+-----------------+-------------+------------+
  567 + bottom sp trd ptds top
  568 + (context[0].__jmpbuf.sp) (private_data)
  569 + */
  570 + sp = sp - (ST_KEYS_MAX * sizeof(void *));
  571 + ptds = (void **) sp;
  572 + sp = sp - sizeof(_st_thread_t);
  573 + trd = (_st_thread_t *) sp;
  574 +
  575 + /* Make stack 64-byte aligned */
  576 + if ((unsigned long)sp & 0x3f) {
  577 + sp = sp - ((unsigned long)sp & 0x3f);
  578 + }
  579 + stack->sp = sp - _ST_STACK_PAD_SIZE;
  580 +#else
  581 + #error "Only Supports Stack Grown Down"
  582 +#endif
  583 +
  584 + memset(trd, 0, sizeof(_st_thread_t));
  585 + memset(ptds, 0, ST_KEYS_MAX * sizeof(void *));
  586 +
  587 + /* Initialize thread */
  588 + trd->private_data = ptds;
  589 + trd->stack = stack;
  590 + trd->start = start;
  591 + trd->arg = arg;
  592 +
  593 +// by winlin, expand macro MD_INIT_CONTEXT
  594 +#if defined(__mips__)
  595 + MD_SETJMP((trd)->context);
  596 + trd->context[0].__jmpbuf[0].__pc = (__ptr_t) _st_thread_main;
  597 + trd->context[0].__jmpbuf[0].__sp = stack->sp;
  598 +#else
  599 + if (MD_SETJMP((trd)->context)) {
  600 + _st_thread_main();
  601 + }
  602 + MD_GET_SP(trd) = (long) (stack->sp);
  603 +#endif
  604 +
  605 + /* If thread is joinable, allocate a termination condition variable */
  606 + if (joinable) {
  607 + trd->term = st_cond_new();
  608 + if (trd->term == NULL) {
  609 + _st_stack_free(trd->stack);
  610 + return NULL;
  611 + }
  612 + }
  613 +
  614 + /* Make thread runnable */
  615 + trd->state = _ST_ST_RUNNABLE;
  616 + _st_active_count++;
  617 + _ST_ADD_RUNQ(trd);
  618 +#ifdef DEBUG
  619 + _ST_ADD_THREADQ(trd);
  620 +#endif
  621 +
  622 + return trd;
  623 +}
  624 +
  625 +_st_thread_t *st_thread_self(void)
  626 +{
  627 + return _ST_CURRENT_THREAD();
  628 +}
  629 +
  630 +#ifdef DEBUG
  631 +/* ARGSUSED */
  632 +void _st_show_thread_stack(_st_thread_t *trd, const char *messg)
  633 +{
  634 +}
  635 +
  636 +/* To be set from debugger */
  637 +int _st_iterate_threads_flag = 0;
  638 +
  639 +void _st_iterate_threads(void)
  640 +{
  641 + static _st_thread_t *trd = NULL;
  642 + static jmp_buf orig_jb, save_jb;
  643 + _st_clist_t *q;
  644 +
  645 + if (!_st_iterate_threads_flag) {
  646 + if (trd) {
  647 + memcpy(trd->context, save_jb, sizeof(jmp_buf));
  648 + MD_LONGJMP(orig_jb, 1);
  649 + }
  650 + return;
  651 + }
  652 +
  653 + if (trd) {
  654 + memcpy(trd->context, save_jb, sizeof(jmp_buf));
  655 + _st_show_thread_stack(trd, NULL);
  656 + } else {
  657 + if (MD_SETJMP(orig_jb)) {
  658 + _st_iterate_threads_flag = 0;
  659 + trd = NULL;
  660 + _st_show_thread_stack(trd, "Iteration completed");
  661 + return;
  662 + }
  663 + trd = _ST_CURRENT_THREAD();
  664 + _st_show_thread_stack(trd, "Iteration started");
  665 + }
  666 +
  667 + q = trd->tlink.next;
  668 + if (q == &_ST_THREADQ) {
  669 + q = q->next;
  670 + }
  671 + ST_ASSERT(q != &_ST_THREADQ);
  672 + trd = _ST_THREAD_THREADQ_PTR(q);
  673 + if (trd == _ST_CURRENT_THREAD()) {
  674 + MD_LONGJMP(orig_jb, 1);
  675 + }
  676 + memcpy(save_jb, trd->context, sizeof(jmp_buf));
  677 + MD_LONGJMP(trd->context, 1);
  678 +}
  679 +#endif /* DEBUG */
  680 +