/[lwip]/contrib/ports/unix/sys_arch.c
ViewVC logotype

Contents of /contrib/ports/unix/sys_arch.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.25 - (show annotations) (download)
Wed Nov 19 13:38:10 2008 UTC (15 years ago) by kieranm
Branch: MAIN
Changes since 1.24: +1 -1 lines
File MIME type: text/plain
Fix name of sys_now() function (was called sys_unix_now()) in unix port

1 /*
2 * Copyright (c) 2001-2003 Swedish Institute of Computer Science.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
25 * OF SUCH DAMAGE.
26 *
27 * This file is part of the lwIP TCP/IP stack.
28 *
29 * Author: Adam Dunkels <adam@sics.se>
30 *
31 */
32
33 /*
34 * Wed Apr 17 16:05:29 EDT 2002 (James Roth)
35 *
36 * - Fixed an unlikely sys_thread_new() race condition.
37 *
38 * - Made current_thread() work with threads which where
39 * not created with sys_thread_new(). This includes
40 * the main thread and threads made with pthread_create().
41 *
42 * - Catch overflows where more than SYS_MBOX_SIZE messages
43 * are waiting to be read. The sys_mbox_post() routine
44 * will block until there is more room instead of just
45 * leaking messages.
46 */
47 #include "lwip/debug.h"
48
49 #include <string.h>
50 #include <sys/time.h>
51 #include <sys/types.h>
52 #include <stdlib.h>
53 #include <unistd.h>
54 #include <pthread.h>
55
56 #include "lwip/sys.h"
57 #include "lwip/opt.h"
58 #include "lwip/stats.h"
59
60 #define UMAX(a, b) ((a) > (b) ? (a) : (b))
61
62 static struct sys_thread *threads = NULL;
63 static pthread_mutex_t threads_mutex = PTHREAD_MUTEX_INITIALIZER;
64
65 struct sys_mbox_msg {
66 struct sys_mbox_msg *next;
67 void *msg;
68 };
69
70 #define SYS_MBOX_SIZE 128
71
72 struct sys_mbox {
73 int first, last;
74 void *msgs[SYS_MBOX_SIZE];
75 struct sys_sem *not_empty;
76 struct sys_sem *not_full;
77 struct sys_sem *mutex;
78 int wait_send;
79 };
80
81 struct sys_sem {
82 unsigned int c;
83 pthread_cond_t cond;
84 pthread_mutex_t mutex;
85 };
86
87 struct sys_thread {
88 struct sys_thread *next;
89 struct sys_timeouts timeouts;
90 pthread_t pthread;
91 };
92
93
94 static struct timeval starttime;
95
96 static pthread_mutex_t lwprot_mutex = PTHREAD_MUTEX_INITIALIZER;
97 static pthread_t lwprot_thread = (pthread_t) 0xDEAD;
98 static int lwprot_count = 0;
99
100 static struct sys_sem *sys_sem_new_internal(u8_t count);
101 static void sys_sem_free_internal(struct sys_sem *sem);
102
103 static u32_t cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex,
104 u32_t timeout);
105
106 /*-----------------------------------------------------------------------------------*/
107 static struct sys_thread *
108 introduce_thread(pthread_t id)
109 {
110 struct sys_thread *thread;
111
112 thread = malloc(sizeof(struct sys_thread));
113
114 if (thread != NULL) {
115 pthread_mutex_lock(&threads_mutex);
116 thread->next = threads;
117 thread->timeouts.next = NULL;
118 thread->pthread = id;
119 threads = thread;
120 pthread_mutex_unlock(&threads_mutex);
121 }
122
123 return thread;
124 }
125 /*-----------------------------------------------------------------------------------*/
126 static struct sys_thread *
127 current_thread(void)
128 {
129 struct sys_thread *st;
130 pthread_t pt;
131 pt = pthread_self();
132 pthread_mutex_lock(&threads_mutex);
133
134 for(st = threads; st != NULL; st = st->next) {
135 if (pthread_equal(st->pthread, pt)) {
136 pthread_mutex_unlock(&threads_mutex);
137
138 return st;
139 }
140 }
141
142 pthread_mutex_unlock(&threads_mutex);
143
144 st = introduce_thread(pt);
145
146 if (!st) {
147 printf("current_thread???\n");
148 abort();
149 }
150
151 return st;
152 }
153 /*-----------------------------------------------------------------------------------*/
154 sys_thread_t
155 sys_thread_new(char *name, void (* function)(void *arg), void *arg, int stacksize, int prio)
156 {
157 int code;
158 pthread_t tmp;
159 struct sys_thread *st = NULL;
160 LWIP_UNUSED_ARG(name);
161 LWIP_UNUSED_ARG(stacksize);
162 LWIP_UNUSED_ARG(prio);
163
164 code = pthread_create(&tmp,
165 NULL,
166 (void *(*)(void *))
167 function,
168 arg);
169
170 if (0 == code) {
171 st = introduce_thread(tmp);
172 }
173
174 if (NULL == st) {
175 LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_new: pthread_create %d, st = 0x%lx",
176 code, (unsigned long)st));
177 abort();
178 }
179 return st;
180 }
181 /*-----------------------------------------------------------------------------------*/
182 struct sys_mbox *
183 sys_mbox_new(int size)
184 {
185 struct sys_mbox *mbox;
186 LWIP_UNUSED_ARG(size);
187
188 mbox = malloc(sizeof(struct sys_mbox));
189 if (mbox != NULL) {
190 mbox->first = mbox->last = 0;
191 mbox->not_empty = sys_sem_new_internal(0);
192 mbox->not_full = sys_sem_new_internal(0);
193 mbox->mutex = sys_sem_new_internal(1);
194 mbox->wait_send = 0;
195
196 #if SYS_STATS
197 lwip_stats.sys.mbox.used++;
198 if (lwip_stats.sys.mbox.used > lwip_stats.sys.mbox.max) {
199 lwip_stats.sys.mbox.max = lwip_stats.sys.mbox.used;
200 }
201 #endif /* SYS_STATS */
202 }
203 return mbox;
204 }
205 /*-----------------------------------------------------------------------------------*/
206 void
207 sys_mbox_free(struct sys_mbox *mbox)
208 {
209 if (mbox != SYS_MBOX_NULL) {
210 #if SYS_STATS
211 lwip_stats.sys.mbox.used--;
212 #endif /* SYS_STATS */
213 sys_sem_wait(mbox->mutex);
214
215 sys_sem_free_internal(mbox->not_empty);
216 sys_sem_free_internal(mbox->not_full);
217 sys_sem_free_internal(mbox->mutex);
218 mbox->not_empty = mbox->not_full = mbox->mutex = NULL;
219 /* LWIP_DEBUGF("sys_mbox_free: mbox 0x%lx\n", mbox); */
220 free(mbox);
221 }
222 }
223 /*-----------------------------------------------------------------------------------*/
224 err_t
225 sys_mbox_trypost(struct sys_mbox *mbox, void *msg)
226 {
227 u8_t first;
228
229 sys_sem_wait(mbox->mutex);
230
231 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_trypost: mbox %p msg %p\n",
232 (void *)mbox, (void *)msg));
233
234 if ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
235 sys_sem_signal(mbox->mutex);
236 return ERR_MEM;
237 }
238
239 mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
240
241 if (mbox->last == mbox->first) {
242 first = 1;
243 } else {
244 first = 0;
245 }
246
247 mbox->last++;
248
249 if (first) {
250 sys_sem_signal(mbox->not_empty);
251 }
252
253 sys_sem_signal(mbox->mutex);
254
255 return ERR_OK;
256 }
257 /*-----------------------------------------------------------------------------------*/
258 void
259 sys_mbox_post(struct sys_mbox *mbox, void *msg)
260 {
261 u8_t first;
262
263 sys_sem_wait(mbox->mutex);
264
265 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_post: mbox %p msg %p\n", (void *)mbox, (void *)msg));
266
267 while ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
268 mbox->wait_send++;
269 sys_sem_signal(mbox->mutex);
270 sys_arch_sem_wait(mbox->not_full, 0);
271 sys_arch_sem_wait(mbox->mutex, 0);
272 mbox->wait_send--;
273 }
274
275 mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
276
277 if (mbox->last == mbox->first) {
278 first = 1;
279 } else {
280 first = 0;
281 }
282
283 mbox->last++;
284
285 if (first) {
286 sys_sem_signal(mbox->not_empty);
287 }
288
289 sys_sem_signal(mbox->mutex);
290 }
291 /*-----------------------------------------------------------------------------------*/
292 u32_t
293 sys_arch_mbox_tryfetch(struct sys_mbox *mbox, void **msg)
294 {
295 sys_arch_sem_wait(mbox->mutex, 0);
296
297 if (mbox->first == mbox->last) {
298 sys_sem_signal(mbox->mutex);
299 return SYS_MBOX_EMPTY;
300 }
301
302 if (msg != NULL) {
303 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p msg %p\n", (void *)mbox, *msg));
304 *msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
305 }
306 else{
307 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p, null msg\n", (void *)mbox));
308 }
309
310 mbox->first++;
311
312 if (mbox->wait_send) {
313 sys_sem_signal(mbox->not_full);
314 }
315
316 sys_sem_signal(mbox->mutex);
317
318 return 0;
319 }
320 /*-----------------------------------------------------------------------------------*/
321 u32_t
322 sys_arch_mbox_fetch(struct sys_mbox *mbox, void **msg, u32_t timeout)
323 {
324 u32_t time_needed = 0;
325
326 /* The mutex lock is quick so we don't bother with the timeout
327 stuff here. */
328 sys_arch_sem_wait(mbox->mutex, 0);
329
330 while (mbox->first == mbox->last) {
331 sys_sem_signal(mbox->mutex);
332
333 /* We block while waiting for a mail to arrive in the mailbox. We
334 must be prepared to timeout. */
335 if (timeout != 0) {
336 time_needed = sys_arch_sem_wait(mbox->not_empty, timeout);
337
338 if (time_needed == SYS_ARCH_TIMEOUT) {
339 return SYS_ARCH_TIMEOUT;
340 }
341 } else {
342 sys_arch_sem_wait(mbox->not_empty, 0);
343 }
344
345 sys_arch_sem_wait(mbox->mutex, 0);
346 }
347
348 if (msg != NULL) {
349 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p msg %p\n", (void *)mbox, *msg));
350 *msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
351 }
352 else{
353 LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p, null msg\n", (void *)mbox));
354 }
355
356 mbox->first++;
357
358 if (mbox->wait_send) {
359 sys_sem_signal(mbox->not_full);
360 }
361
362 sys_sem_signal(mbox->mutex);
363
364 return time_needed;
365 }
366 /*-----------------------------------------------------------------------------------*/
367 static struct sys_sem *
368 sys_sem_new_internal(u8_t count)
369 {
370 struct sys_sem *sem;
371
372 sem = malloc(sizeof(struct sys_sem));
373 if (sem != NULL) {
374 sem->c = count;
375 pthread_cond_init(&(sem->cond), NULL);
376 pthread_mutex_init(&(sem->mutex), NULL);
377 }
378 return sem;
379 }
380 /*-----------------------------------------------------------------------------------*/
381 struct sys_sem *
382 sys_sem_new(u8_t count)
383 {
384 #if SYS_STATS
385 lwip_stats.sys.sem.used++;
386 if (lwip_stats.sys.sem.used > lwip_stats.sys.sem.max) {
387 lwip_stats.sys.sem.max = lwip_stats.sys.sem.used;
388 }
389 #endif /* SYS_STATS */
390 return sys_sem_new_internal(count);
391 }
392 /*-----------------------------------------------------------------------------------*/
393 static u32_t
394 cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, u32_t timeout)
395 {
396 int tdiff;
397 unsigned long sec, usec;
398 struct timeval rtime1, rtime2;
399 struct timespec ts;
400 struct timezone tz;
401 int retval;
402
403 if (timeout > 0) {
404 /* Get a timestamp and add the timeout value. */
405 gettimeofday(&rtime1, &tz);
406 sec = rtime1.tv_sec;
407 usec = rtime1.tv_usec;
408 usec += timeout % 1000 * 1000;
409 sec += (int)(timeout / 1000) + (int)(usec / 1000000);
410 usec = usec % 1000000;
411 ts.tv_nsec = usec * 1000;
412 ts.tv_sec = sec;
413
414 retval = pthread_cond_timedwait(cond, mutex, &ts);
415
416 if (retval == ETIMEDOUT) {
417 return SYS_ARCH_TIMEOUT;
418 } else {
419 /* Calculate for how long we waited for the cond. */
420 gettimeofday(&rtime2, &tz);
421 tdiff = (rtime2.tv_sec - rtime1.tv_sec) * 1000 +
422 (rtime2.tv_usec - rtime1.tv_usec) / 1000;
423
424 if (tdiff <= 0) {
425 return 0;
426 }
427
428 return tdiff;
429 }
430 } else {
431 pthread_cond_wait(cond, mutex);
432 return SYS_ARCH_TIMEOUT;
433 }
434 }
435 /*-----------------------------------------------------------------------------------*/
436 u32_t
437 sys_arch_sem_wait(struct sys_sem *sem, u32_t timeout)
438 {
439 u32_t time_needed = 0;
440
441 pthread_mutex_lock(&(sem->mutex));
442 while (sem->c <= 0) {
443 if (timeout > 0) {
444 time_needed = cond_wait(&(sem->cond), &(sem->mutex), timeout);
445
446 if (time_needed == SYS_ARCH_TIMEOUT) {
447 pthread_mutex_unlock(&(sem->mutex));
448 return SYS_ARCH_TIMEOUT;
449 }
450 /* pthread_mutex_unlock(&(sem->mutex));
451 return time_needed; */
452 } else {
453 cond_wait(&(sem->cond), &(sem->mutex), 0);
454 }
455 }
456 sem->c--;
457 pthread_mutex_unlock(&(sem->mutex));
458 return time_needed;
459 }
460 /*-----------------------------------------------------------------------------------*/
461 void
462 sys_sem_signal(struct sys_sem *sem)
463 {
464 pthread_mutex_lock(&(sem->mutex));
465 sem->c++;
466
467 if (sem->c > 1) {
468 sem->c = 1;
469 }
470
471 pthread_cond_broadcast(&(sem->cond));
472 pthread_mutex_unlock(&(sem->mutex));
473 }
474 /*-----------------------------------------------------------------------------------*/
475 static void
476 sys_sem_free_internal(struct sys_sem *sem)
477 {
478 pthread_cond_destroy(&(sem->cond));
479 pthread_mutex_destroy(&(sem->mutex));
480 free(sem);
481 }
482 /*-----------------------------------------------------------------------------------*/
483 void
484 sys_sem_free(struct sys_sem *sem)
485 {
486 if (sem != SYS_SEM_NULL) {
487 #if SYS_STATS
488 lwip_stats.sys.sem.used--;
489 #endif /* SYS_STATS */
490 sys_sem_free_internal(sem);
491 }
492 }
493 /*-----------------------------------------------------------------------------------*/
494 unsigned long
495 sys_now(void)
496 {
497 struct timeval tv;
498 struct timezone tz;
499 long sec, usec;
500 unsigned long msec;
501 gettimeofday(&tv, &tz);
502
503 sec = tv.tv_sec - starttime.tv_sec;
504 usec = tv.tv_usec - starttime.tv_usec;
505 msec = sec * 1000 + usec / 1000;
506
507 return msec;
508 }
509 /*-----------------------------------------------------------------------------------*/
510 void
511 sys_init()
512 {
513 struct timezone tz;
514 gettimeofday(&starttime, &tz);
515 }
516 /*-----------------------------------------------------------------------------------*/
517 struct sys_timeouts *
518 sys_arch_timeouts(void)
519 {
520 struct sys_thread *thread;
521
522 thread = current_thread();
523 return &thread->timeouts;
524 }
525 /*-----------------------------------------------------------------------------------*/
526 /** sys_prot_t sys_arch_protect(void)
527
528 This optional function does a "fast" critical region protection and returns
529 the previous protection level. This function is only called during very short
530 critical regions. An embedded system which supports ISR-based drivers might
531 want to implement this function by disabling interrupts. Task-based systems
532 might want to implement this by using a mutex or disabling tasking. This
533 function should support recursive calls from the same task or interrupt. In
534 other words, sys_arch_protect() could be called while already protected. In
535 that case the return value indicates that it is already protected.
536
537 sys_arch_protect() is only required if your port is supporting an operating
538 system.
539 */
540 sys_prot_t
541 sys_arch_protect(void)
542 {
543 /* Note that for the UNIX port, we are using a lightweight mutex, and our
544 * own counter (which is locked by the mutex). The return code is not actually
545 * used. */
546 if (lwprot_thread != pthread_self())
547 {
548 /* We are locking the mutex where it has not been locked before *
549 * or is being locked by another thread */
550 pthread_mutex_lock(&lwprot_mutex);
551 lwprot_thread = pthread_self();
552 lwprot_count = 1;
553 }
554 else
555 /* It is already locked by THIS thread */
556 lwprot_count++;
557 return 0;
558 }
559 /*-----------------------------------------------------------------------------------*/
560 /** void sys_arch_unprotect(sys_prot_t pval)
561
562 This optional function does a "fast" set of critical region protection to the
563 value specified by pval. See the documentation for sys_arch_protect() for
564 more information. This function is only required if your port is supporting
565 an operating system.
566 */
567 void
568 sys_arch_unprotect(sys_prot_t pval)
569 {
570 LWIP_UNUSED_ARG(pval);
571 if (lwprot_thread == pthread_self())
572 {
573 if (--lwprot_count == 0)
574 {
575 lwprot_thread = (pthread_t) 0xDEAD;
576 pthread_mutex_unlock(&lwprot_mutex);
577 }
578 }
579 }
580
581 /*-----------------------------------------------------------------------------------*/
582
583 #ifndef MAX_JIFFY_OFFSET
584 #define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
585 #endif
586
587 #ifndef HZ
588 #define HZ 100
589 #endif
590
591 u32_t
592 sys_jiffies(void)
593 {
594 struct timeval tv;
595 unsigned long sec;
596 long usec;
597
598 gettimeofday(&tv,NULL);
599 sec = tv.tv_sec - starttime.tv_sec;
600 usec = tv.tv_usec;
601
602 if (sec >= (MAX_JIFFY_OFFSET / HZ))
603 return MAX_JIFFY_OFFSET;
604 usec += 1000000L / HZ - 1;
605 usec /= 1000000L / HZ;
606 return HZ * sec + usec;
607 }
608
609 #if PPP_DEBUG
610
611 #include <stdarg.h>
612
613 void ppp_trace(int level, const char *format, ...)
614 {
615 va_list args;
616
617 (void)level;
618 va_start(args, format);
619 vprintf(format, args);
620 va_end(args);
621 }
622 #endif

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26