PS2SDK
PS2 Homebrew Libraries
Loading...
Searching...
No Matches
thcommon.c
1#include "thcommon.h"
2#include "kerr.h"
3#include "thevent.h"
4#include "timrman.h"
5#include "sysmem.h"
6#include "heaplib.h"
7#include "intrman.h"
8#include "loadcore.h"
9#include "sysclib.h"
10#include "stdio.h"
11
12#include <limits.h>
13
14IRX_ID("Multi_Thread_Manager", 2, 3);
15extern struct irx_export_table _exp_thrdman;
16extern struct irx_export_table _exp_thbase;
17extern struct irx_export_table _exp_thevent;
18extern struct irx_export_table _exp_thsemap;
19extern struct irx_export_table _exp_thmsgbx;
20extern struct irx_export_table _exp_thfpool;
21extern struct irx_export_table _exp_thvpool;
22
23struct thread_context thctx;
24
25struct alarm *alarm_alloc()
26{
27 struct alarm *alarm;
28 if (list_empty(&thctx.alarm_pool)) {
29 alarm = heap_alloc(0, sizeof(*alarm));
30 thctx.alarm_id++;
31 alarm->tag.id = thctx.alarm_id;
32 } else {
33 alarm = list_first_entry(&thctx.alarm_pool, struct alarm, alarm_list);
34 list_remove(&alarm->alarm_list);
35 }
36
37 return alarm;
38}
39
40void alarm_free(struct alarm *alarm)
41{
42 if (alarm->tag.id >= 33) {
43 heap_free(&alarm->tag);
44 } else {
45 list_insert(&thctx.alarm_pool, &alarm->alarm_list);
46 }
47}
48
49void alarm_insert(struct list_head *list, struct alarm *alarm)
50{
51 struct alarm *i;
52
53 list_for_each (i, list, alarm_list) {
54 if (alarm->target < i->target) {
55 break;
56 }
57 }
58
59 list_insert(&i->alarm_list, &alarm->alarm_list);
60}
61
62void waitlist_insert(struct thread *thread, struct event *event, s32 priority)
63{
64 struct thread *weaker;
65
66 list_remove(&thread->queue);
67
68 weaker = list_first_entry(&event->waiters, struct thread, queue);
69 list_for_each (weaker, &event->waiters, queue) {
70 if (priority < weaker->priority) {
71 break;
72 }
73 }
74
75 list_insert(&weaker->queue, &thread->queue);
76}
77
78void update_timer_compare(int timid, u64 time, struct list_head *alarm_list)
79{
80 struct alarm *prev, *i;
81 u32 counter, new_compare = 0;
82
83 // what if list is empty? (luckily its not but....)
84 prev = list_first_entry(alarm_list, struct alarm, alarm_list);
85
86 if (!list_empty(alarm_list)) {
87 list_for_each (i, alarm_list, alarm_list) {
88 if (i->target >= prev->target + thctx.unk4c8) {
89 break;
90 }
91
92 prev = i;
93 }
94 }
95
96 if (prev->target - time >= thctx.unk4c8) {
97 new_compare = prev->target;
98 } else {
99 counter = GetTimerCounter(timid);
100 new_compare = counter + thctx.unk4c8;
101 }
102
103 SetTimerCompare(timid, new_compare);
104}
105
106unsigned int thread_delay_cb(void *user)
107{
108 struct thread *thread = user;
109
110 list_remove(&thread->queue);
111 thread->status = THS_READY;
112 readyq_insert_back(thread);
113 thctx.run_next = NULL;
114
115 return 0;
116}
117
118int check_thread_stack()
119{
120 int stack_remaining;
121 stack_remaining = (u32)&stack_remaining - (u32)thctx.current_thread->stack_top;
122
123 if (stack_remaining < 0xa8) {
125 Kprintf("CheckThreadStack()\n");
126 thread_leave(0, 0, 0, 0);
127 }
128
129 return stack_remaining;
130}
131
132void *heap_alloc(u16 tag, u32 bytes)
133{
134 struct heaptag *ptr = AllocHeapMemory(thctx.heap, bytes);
135 if (ptr) {
136 memset(ptr, 0, bytes);
137 ptr->tag = tag;
138 }
139
140 return ptr;
141}
142
143int heap_free(struct heaptag *tag)
144{
145 tag->tag = 0;
146 return FreeHeapMemory(thctx.heap, tag);
147}
148
158int thread_leave(int ret1, int ret2, int intr_state, int release)
159{
160 register u32 a0 __asm__("a0") = ret1;
161 register u32 a1 __asm__("a1") = ret2;
162 register u32 a2 __asm__("a2") = intr_state;
163 register s32 result __asm__("v0");
164
165 if (!release) {
166 thctx.current_thread->reason_counter = &thctx.current_thread->thread_preemption_count;
167 } else {
168 thctx.current_thread->reason_counter = &thctx.current_thread->release_count;
169 }
170
171 asm __volatile__("li $v0, 0x20\n"
172 "syscall\n"
173 : "=r"(result)
174 : "r"(a0), "r"(a1), "r"(a2)
175 : "memory");
176
177 return result;
178}
179
180int thread_start(struct thread *thread, int intr_state)
181{
182 if (thread->priority < thctx.current_thread->priority) {
183 thctx.current_thread->status = THS_READY;
184 readyq_insert_front(thctx.current_thread);
185 thread->status = THS_RUN;
186 thctx.run_next = thread;
187
188 return thread_leave(KE_OK, 0, intr_state, 0);
189 } else {
190 thread->status = THS_READY;
191 readyq_insert_back(thread);
192
193 CpuResumeIntr(intr_state);
194 return KE_OK;
195 }
196}
197
198int thread_init_and_start(struct thread *thread, int intr_state)
199{
200 thread->wait_type = 0;
201 thread->wait_usecs = 0;
202 thread->wakeup_count = 0;
203 thread->priority = thread->init_priority;
204 thread->saved_regs->unk = -2;
205 thread->saved_regs->sp = (u32)&thread->saved_regs[1];
206 thread->saved_regs->fp = thread->saved_regs->sp;
207 thread->saved_regs->ra = (u32)ExitThread;
208 thread->saved_regs->gp = thread->gp;
209 thread->saved_regs->sr = 0x404;
210 thread->saved_regs->sr |= thread->attr & 8;
211 thread->saved_regs->pc = (u32)thread->entry;
212 thread->saved_regs->I_CTRL = 1;
213
214 list_remove(&thread->queue);
215
216 return thread_start(thread, intr_state);
217}
218
219int post_boot_callback_1(iop_init_entry_t *next, int delayed)
220{
222 printf("\r\nIOP Realtime Kernel Ver. 2.2\r\n Copyright 1999-2002 (C) Sony Computer Entertainment Inc. \r\n");
223 return 0;
224}
225
226int post_boot_callback_2(iop_init_entry_t *next, int delayed)
227{
229 ChangeThreadPriority(0, 126);
230 if (!next->callback) {
231 while (1) {
232 DelayThread(1000000);
233 }
234 }
235
236 return 0;
237}
238
239int read_sys_time(iop_sys_clock_t *clock)
240{
241 u32 hi = thctx.time_hi;
242 u32 counter = GetTimerCounter(thctx.timer_id);
243
244 if (counter >= thctx.time_lo) {
245 thctx.time_lo = counter;
246 } else {
247 hi++;
248 }
249
250 if (clock) {
251 clock->hi = hi;
252 clock->lo = counter;
253 }
254
255 return 0;
256}
257
258// didn't try to figure out the original algo
259// just grabbed this one
260static u32 ntz(u32 x)
261{
262 u32 n;
263
264 if (x == 0)
265 return (32);
266 n = 1;
267 if ((x & 0x0000FFFF) == 0) {
268 n = n + 16;
269 x = x >> 16;
270 }
271 if ((x & 0x000000FF) == 0) {
272 n = n + 8;
273 x = x >> 8;
274 }
275 if ((x & 0x0000000F) == 0) {
276 n = n + 4;
277 x = x >> 4;
278 }
279 if ((x & 0x00000003) == 0) {
280 n = n + 2;
281 x = x >> 2;
282 }
283 return n - (x & 1);
284}
285
286u32 readyq_highest()
287{
288 for (int i = 0; i < 4; i++) {
289 if (thctx.queue_map[i]) {
290 return ntz(thctx.queue_map[i]) + 32 * i;
291 }
292 }
293
294 return 128;
295}
296
297void report_stack_overflow(struct thread *thread)
298{
299 ModuleInfo_t *img_info;
300 char *name;
301
302 Kprintf("\nThread (thid=%x, #%d) stack overflow\n Stack = %x, Stack size = %x, SP=%x\n",
303 MAKE_HANDLE(thread),
304 thread->tag.id,
305 thread->stack_top,
306 thread->stack_size,
307 thread->saved_regs);
308
309 img_info = FindImageInfo(thread->entry);
310 if (img_info) {
311 name = img_info->name;
312 if (name) {
313 Kprintf(" Module Name = %s\n", name);
314 }
315 }
316
317 __builtin_trap();
318}
319
320void do_delete_thread()
321{
322 struct thread *thread;
323
324 while (!list_empty(&thctx.delete_queue)) {
325 thread = list_first_entry(&thctx.delete_queue, struct thread, queue);
326 if (thread->attr & TH_CLEAR_STACK) {
327 memset(thread->stack_top, 0, thread->stack_size);
328 }
329
330 FreeSysMemory(thread->stack_top);
331 list_remove(&thread->queue);
332 list_remove(&thread->thread_list);
333 heap_free(&thread->tag);
334 }
335}
336
337void schedule_next()
338{
339 struct thread *cur, *new;
340 u32 prio;
341
342 cur = thctx.current_thread;
343 thctx.run_next = thctx.current_thread;
344
345 prio = readyq_highest();
346
347 // originally would fall down and hit the bottom kprintf
348 // but i don't want the nesting
349 if (prio >= 128) {
350 Kprintf("Panic: not found ready Thread\n");
351 return;
352 }
353
354 new = list_first_entry(&thctx.ready_queue[prio], struct thread, queue);
355
356 if (thctx.current_thread->status == THS_RUN) {
357 if (thctx.debug_flags & 4) {
358 Kprintf(" THS_RUN cp=%d : hp=%d ", cur->priority, prio);
359 }
360
361 if (prio < cur->priority) {
362 if (thctx.debug_flags & 4) {
363 Kprintf(" readyq = %x, newrun = %x:%d, prio = %d",
364 &thctx.ready_queue[prio],
365 new,
366 new->tag.id,
367 prio);
368 }
369
370 readyq_remove(new, prio);
371 new->status = THS_RUN;
372 thctx.run_next = new;
373 cur->status = THS_READY;
374 readyq_insert_front(cur);
375 }
376 } else {
377 if (thctx.debug_flags & 4) {
378 Kprintf(" not THS_RUN ");
379
380 Kprintf(" readyq = %x, newrun = %x:%d, prio = %d",
381 &thctx.ready_queue[prio],
382 new,
383 new->tag.id,
384 prio);
385 }
386
387 readyq_remove(new, prio);
388 new->status = THS_RUN;
389 thctx.run_next = new;
390 }
391
392 if ((thctx.debug_flags & 4) != 0)
393 Kprintf("\n");
394}
395
396struct regctx *new_context_cb(struct regctx *ctx)
397{
398 u64 new_time;
399 u32 timer;
400
401 if ((thctx.debug_flags & 3) != 0) {
402 if ((thctx.debug_flags & 3) == 1)
403 Kprintf("[%3d->", thctx.current_thread->tag.id);
404 if ((thctx.debug_flags & 3) == 2)
405 Kprintf("switch_context(%x:%x,pc=%x,ei=%x =>%x:%d)\n",
406 ctx,
407 ctx->unk,
408 ctx->pc,
409 ctx->I_CTRL,
410 thctx.current_thread,
411 thctx.current_thread->tag.id);
412 }
413
414 thctx.current_thread->saved_regs = ctx;
415 if ((u32)ctx < (u32)thctx.current_thread->stack_top) {
416 report_stack_overflow(thctx.current_thread);
417 }
418
419 if (!thctx.run_next) {
420 if (!list_empty(&thctx.delete_queue)) {
421 do_delete_thread();
422 }
423
424 schedule_next();
425 }
426
427 if (thctx.current_thread == thctx.run_next) {
428 thctx.thread_resume_count++;
429 } else {
430 timer = thctx.timer_func();
431 new_time = add64(0, timer - thctx.last_timer, thctx.current_thread->run_clocks_hi, thctx.current_thread->run_clocks_lo);
432
433 thctx.current_thread->run_clocks_lo = (u32)new_time;
434 thctx.current_thread->run_clocks_hi = (u32)(new_time >> 32);
435 thctx.thread_switch_count++;
436 (*thctx.current_thread->reason_counter)++;
437 }
438
439 thctx.current_thread = thctx.run_next;
440
441 if ((thctx.debug_flags & 3) != 0) {
442 if ((thctx.debug_flags & 3) == 1)
443 Kprintf("%3d]", thctx.run_next->tag.id);
444 if ((thctx.debug_flags & 3) == 2)
445 Kprintf(" switch_context --> %x:%x,pc=%x,ei=%x =>%x:%d\n",
446 thctx.run_next->saved_regs,
447 thctx.run_next->saved_regs->unk,
448 thctx.run_next->saved_regs->pc,
449 thctx.run_next->saved_regs->I_CTRL,
450 thctx.run_next,
451 thctx.run_next->tag.id);
452 }
453
454 // some sort of debug display?
455 if (thctx.debug_flags & 0x20) {
456 _sw(~(1 << ((thctx.run_next->tag.id - 1) & 7)), 0xbf802070);
457 }
458
459 return thctx.run_next->saved_regs;
460}
461
462int preempt_cb(int unk)
463{
464 if (thctx.run_next != thctx.current_thread) {
465 thctx.current_thread->reason_counter = &thctx.current_thread->irq_preemption_count;
466 return 1;
467 }
468
469 return 0;
470}
471
472void idle_thread()
473{
474 while (1)
475 ;
476}
477
478int timer_handler(void *user)
479{
480 struct thread_context *thctx = user;
481 struct alarm *alarm;
482 u32 status, counter, ret;
483 u64 time = 0;
484
485 status = GetTimerStatus(thctx->timer_id);
486 counter = GetTimerCounter(thctx->timer_id);
487
488 // overflow
489 if (status & 0x1000) {
490 thctx->time_hi++;
491 thctx->time_lo = counter;
492 }
493
494 // compare
495 if (status & 0x800) {
496 list_for_each_safe (alarm, &thctx->alarm, alarm_list) {
497 counter = GetTimerCounter(thctx->timer_id);
498 status = GetTimerStatus(thctx->timer_id);
499 if (counter < thctx->time_lo && (status & 0x1000)) {
500 thctx->time_hi++;
501 thctx->time_lo = counter;
502 }
503
504 time = as_u64(thctx->time_hi, counter);
505 if (time < alarm->target) {
506 break;
507 }
508
509 // alarm has fired, remove, update, and reschedule
510 list_remove(&alarm->alarm_list);
511
512 if (alarm->tag.id == 1) {
513 alarm->target += 0x100000000;
514 } else {
515 ret = alarm->cb(alarm->userptr);
516 if (!ret) {
517 alarm_free(alarm);
518 thctx->alarm_count--;
519 continue;
520 }
521
522 if (ret < thctx->min_wait) {
523 ret = thctx->min_wait;
524 }
525
526 alarm->target += ret;
527 }
528
529 alarm_insert(&thctx->alarm, alarm);
530 }
531
532 update_timer_compare(thctx->timer_id, time, &thctx->alarm);
533 }
534
535
536 return 1;
537}
538
539
540void init_timer()
541{
542 iop_sys_clock_t compare;
543 s32 timer_id, timer_irq;
544 struct alarm *alarm;
545 int *bootmode;
546 int state;
547
548 thctx.unk_clock_mult = 0x1200;
549 thctx.unk_clock_div = 125;
550
551 bootmode = QueryBootMode(7);
552 if (bootmode && *bootmode == 200) {
553 thctx.unk_clock_mult = 25;
554 thctx.unk_clock_div = 1;
555 }
556
557 USec2SysClock(100, &compare);
558
559 thctx.min_wait = compare.lo;
560 thctx.unk4c8 = 2 * compare.lo;
561
562 timer_id = AllocHardTimer(1, 32, 1);
563 thctx.timer_id = timer_id;
564 thctx.timer_func = GetTimerReadFunc(timer_id);
565 timer_irq = GetHardTimerIntrCode(timer_id);
566 RegisterIntrHandler(timer_irq, 1, timer_handler, &thctx);
567
568 list_init(&thctx.alarm);
569 list_init(&thctx.alarm_pool);
570 thctx.alarm_id = 0;
571 CpuSuspendIntr(&state);
572 alarm = alarm_alloc();
573 list_insert(&thctx.alarm, &alarm->alarm_list);
574 USec2SysClock(2000, &compare);
575 // hmm
576 alarm->target = 0x100000000LL - compare.lo;
577
578 thctx.alarm_count = 1;
579
580 for (int i = 0; i < 32; i++) {
581 alarm = heap_alloc(0, sizeof(*alarm));
582 thctx.alarm_id++;
583 alarm->tag.id = thctx.alarm_id;
584 alarm_free(alarm);
585 }
586
587 SetTimerMode(timer_id, 0);
588 SetTimerCompare(timer_id, compare.lo);
589 SetTimerCounter(timer_id, 0);
590 SetTimerMode(timer_id, 0x70);
591 EnableIntr(GetHardTimerIntrCode(timer_id));
592 CpuResumeIntr(state);
593}
594
595
596int _start(int argc, char **argv)
597{
598 struct thread *idle, *current;
599 iop_event_t flag;
600 int *BootMode;
601 int state;
602 int i;
603
604 if (RegisterNonAutoLinkEntries(&_exp_thrdman)) {
605 return MODULE_NO_RESIDENT_END;
606 }
607
608 if (RegisterLibraryEntries(&_exp_thbase)) {
609 return MODULE_NO_RESIDENT_END;
610 }
611
612 CpuSuspendIntr(&state);
613 RegisterLibraryEntries(&_exp_thevent);
614 RegisterLibraryEntries(&_exp_thsemap);
615 RegisterLibraryEntries(&_exp_thmsgbx);
616 RegisterLibraryEntries(&_exp_thfpool);
617 RegisterLibraryEntries(&_exp_thvpool);
618
619 memset(&thctx, 0, sizeof(thctx));
620 thctx.debug_flags = DEBUG_FLAGS;
621
622 list_init(&thctx.semaphore);
623 list_init(&thctx.event_flag);
624 list_init(&thctx.mbox);
625 list_init(&thctx.vpool);
626 list_init(&thctx.fpool);
627 list_init(&thctx.sleep_queue);
628 list_init(&thctx.delay_queue);
629 // list_init(&thctx.unused_list1);
630 // list_init(&thctx.unused_list2);
631 list_init(&thctx.dormant_queue);
632 list_init(&thctx.delete_queue);
633 list_init(&thctx.thread_list);
634
635 for (int i = 0; i < 128; i++) {
636 list_init(&thctx.ready_queue[i]);
637 }
638
639 thctx.heap = CreateHeap(2048, 1);
640
641 // Create the idle thread
642 idle = heap_alloc(TAG_THREAD, sizeof(*idle));
643 idle->tag.id = ++thctx.thread_id;
644 idle->stack_size = 512;
645 idle->stack_top = AllocSysMemory(1, 512, 0);
646 idle->init_priority = 127;
647 idle->priority = 127;
648 idle->attr = TH_C;
649 idle->status = THS_READY;
650 idle->entry = idle_thread;
651 idle->saved_regs = idle->stack_top + (((idle->stack_size << 2) >> 2) - RESERVED_REGCTX_SIZE);
652 memset(idle->saved_regs, 0, RESERVED_REGCTX_SIZE);
653
654 asm __volatile__("sw $gp, %0\n"
655 : "=m"(idle->gp)::);
656
657 idle->saved_regs->unk = -2;
658 idle->saved_regs->sp = (u32)&idle->saved_regs[1];
659 idle->saved_regs->gp = idle->gp;
660 idle->saved_regs->fp = idle->saved_regs->sp;
661 idle->saved_regs->ra = (u32)ExitThread;
662 idle->saved_regs->sr = (idle->attr & 0xF0000000) | 0x404;
663 idle->saved_regs->sr |= idle->attr & 8;
664 idle->saved_regs->pc = (u32)idle->entry;
665 idle->saved_regs->I_CTRL = 1;
666
667 list_insert(&thctx.thread_list, &idle->thread_list);
668 thctx.idle_thread = idle;
669 readyq_insert_back(idle);
670
671 // Create a thread entry for our current state
672 current = heap_alloc(TAG_THREAD, sizeof(*current));
673 current->tag.id = ++thctx.thread_id;
674 // Taking the address of a stack variable to get
675 // the allocated stack and size. Cute.
676 current->stack_size = QueryBlockSize(&i);
677 current->stack_top = QueryBlockTopAddress(&i);
678 current->init_priority = 8;
679 current->priority = 1;
680 current->attr = TH_C;
681 current->status = THS_RUN;
682
683 asm __volatile__("sw $gp, %0\n"
684 : "=m"(current->gp)::);
685
686 list_insert(&thctx.thread_list, &current->thread_list);
687 thctx.current_thread = current;
688 thctx.run_next = current;
689 current->queue.next = NULL;
690 current->queue.prev = NULL;
691
692 SetNewCtxCb(new_context_cb);
693 SetShouldPreemptCb(preempt_cb);
694 init_timer();
695
696 flag.attr = EA_MULTI;
697 flag.bits = 0;
698 flag.option = 0;
699 thctx.sytem_status_flag = CreateEventFlag(&flag);
700
701 BootMode = QueryBootMode(4);
702 if (BootMode) {
703 SetEventFlag(thctx.sytem_status_flag, 1 << (*BootMode & 3));
704 }
705
706 RegisterPostBootCallback(post_boot_callback_1, 2, 0);
707 RegisterPostBootCallback(post_boot_callback_2, 3, 0);
708
709 // mismatched with suspend?
711
712 return MODULE_RESIDENT_END;
713}
int CpuEnableIntr()
Definition intrman.c:250
int CpuResumeIntr(int state)
Definition intrman.c:227
int RegisterIntrHandler(int irq, int mode, int(*handler)(void *), void *arg)
Definition intrman.c:125
void SetNewCtxCb(void *cb)
Definition intrman.c:713
int CpuDisableIntr()
Definition intrman.c:238
int CpuSuspendIntr(int *state)
Definition intrman.c:205
int EnableIntr(int irq)
Definition intrman.c:346
#define THS_RUN
Definition kernel.h:244
Definition loadcore.h:100
Definition alarm.c:28
#define EA_MULTI
Definition thevent.h:35