11#include "irx_imports.h"
20IRX_ID(
"IOP_SIF_manager", 2, 5);
34 void (*func)(
void *userdata);
57 void (*dma_intr_handler)(
void *userdata);
58 void *dma_intr_handler_userdata;
67static u32 sif_dma2_inited = 0;
68static u32 sif_inited = 0;
75 __asm__ volatile("mfc0 %0, " #reg : "=r"(val)); \
79#define mfc0(reg) _mfc0(reg)
81int _start(
int ac,
char **av)
84 USE_IOP_MMIO_HWPORT();
85 USE_SIF_MMIO_HWPORT();
93 if ( (iop_mmio_hwport->iop_sbus_ctrl[0] & 8) == 0 )
95 if ( sif_mmio_hwport->unk60 == 0x1D000060 )
96 return RegisterLibraryEntries(&_exp_sifman) != 0;
97 if ( (sif_mmio_hwport->unk60 & 0xFFFFF000) == 0 )
98 return RegisterLibraryEntries(&_exp_sifman) != 0;
104static u32 get_msflag()
107 USE_SIF_MMIO_HWPORT();
109 for ( result = sif_mmio_hwport->msflag; result != sif_mmio_hwport->msflag; result = sif_mmio_hwport->msflag )
114static u32 get_smflag()
117 USE_SIF_MMIO_HWPORT();
119 for ( result = sif_mmio_hwport->smflag; result != sif_mmio_hwport->smflag; result = sif_mmio_hwport->smflag )
126 USE_IOP_MMIO_HWPORT();
128 if ( !sif_dma2_inited )
130 iop_mmio_hwport->dmac1.oldch[2].chcr = 0;
131 iop_mmio_hwport->dmac1.dpcr1 |= 0x800;
136static int sif_dma_init(
void);
142 USE_IOP_MMIO_HWPORT();
143 USE_SIF_MMIO_HWPORT();
147 iop_mmio_hwport->dmac2.dpcr2 |= 0x8800;
148 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
149 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
151 if ( (iop_mmio_hwport->iop_sbus_ctrl[0] & 0x10) != 0 )
153 iop_mmio_hwport->iop_sbus_ctrl[0] |= 0x10;
155 iop_mmio_hwport->iop_sbus_ctrl[0] |= 1;
160 msflag = get_msflag();
173 USE_IOP_MMIO_HWPORT();
177 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
178 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
179 if ( iop_mmio_hwport->iop_sbus_ctrl[0] & 0x10 )
181 iop_mmio_hwport->iop_sbus_ctrl[0] |= 0x10;
191void sceSifSetDChain(
void)
193 USE_IOP_MMIO_HWPORT();
194 USE_SIF_MMIO_HWPORT();
196 if ( (sif_mmio_hwport->controlreg & 0x40) == 0 )
197 sif_mmio_hwport->controlreg = 64;
198 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
199 iop_mmio_hwport->dmac2.newch[3].bcr = (iop_mmio_hwport->dmac2.newch[3].bcr & 0xFFFF0000) | 32;
200 iop_mmio_hwport->dmac2.newch[3].chcr = 0x41000300;
203void sceSifSetDmaIntrHandler(
void (*handler)(
void *userdata),
void *arg)
205 sifman_internals.dma_intr_handler = handler;
206 sifman_internals.dma_intr_handler_userdata = arg;
209void sceSifResetDmaIntrHandler()
211 sifman_internals.dma_intr_handler = NULL;
212 sifman_internals.dma_intr_handler_userdata = NULL;
215static int sifman_interrupt_handler(
void *userdata)
217 void (*dma_intr_handler)(
void *userdata);
222 USE_IOP_MMIO_HWPORT();
223 USE_SIF_MMIO_HWPORT();
226 dma_intr_handler = smi->dma_intr_handler;
227 if ( dma_intr_handler )
228 dma_intr_handler(smi->dma_intr_handler_userdata);
229 sif_otherbufcom = smi->sif_otherbufcom;
231 if ( sif_otherbufcom->count > 0 )
239 sif_otherbufcom->info[v5].func(sif_otherbufcom->info[v5].userdata);
240 sif_otherbufcom = smi->sif_otherbufcom;
242 }
while ( v4 < sif_otherbufcom->
count );
244 smi->sif_otherbufcom->count = 0;
245 if ( (iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000) == 0 && smi->dmatag_index > 0 )
247 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
248 iop_mmio_hwport->dmac2.newch[2].tadr = (uiptr)(smi->sif_curbuf);
249 iop_mmio_hwport->dmac2.newch[2].bcr = (iop_mmio_hwport->dmac2.newch[2].bcr & 0xFFFF0000) | 32;
250 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
251 sif_mmio_hwport->controlreg = 32;
253 smi->dmatag_index = 0;
254 if ( smi->sif_curbuf == smi->sif_buf1 )
256 smi->sif_curbuf = smi->sif_buf2;
257 smi->sif_curbufcom = &smi->sif_bufcom2;
258 p_sif_bufcom1 = &smi->sif_bufcom1;
262 smi->sif_curbufcom = &smi->sif_bufcom1;
263 p_sif_bufcom1 = &smi->sif_bufcom2;
264 smi->sif_curbuf = smi->sif_buf1;
266 smi->sif_otherbufcom = p_sif_bufcom1;
267 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000701;
272static int sif_dma_init(
void)
276 sifman_internals.sif_curbuf = sifman_internals.sif_buf1;
277 sifman_internals.sif_curbufcom = &sifman_internals.sif_bufcom1;
278 sifman_internals.dmatag_index = 0;
279 sifman_internals.sif_bufcom1.count = 0;
280 sifman_internals.sif_bufcom2.count = 0;
281 sifman_internals.sif_otherbufcom = &sifman_internals.sif_bufcom2;
282 sifman_internals.dma_intr_handler = 0;
283 sifman_internals.dma_intr_handler_userdata = 0;
298 v1 = &sifman_internals.sif_curbuf[sifman_internals.dmatag_index];
299 v2 = (int)a1->src & 0xFFFFFF;
303 if ( (a1->attr & 2) != 0 )
304 v1->data = v2 | 0x40000000;
305 v1->words = v4 & 0xFFFFFF;
309 v1->count = v5 | 0x10000000;
310 if ( (a1->attr & 4) != 0 )
311 v1->count |= 0x80000000;
312 v1->addr = (int)a1->dest & 0x1FFFFFFF;
313 return ++sifman_internals.dmatag_index;
323 USE_IOP_MMIO_HWPORT();
324 USE_SIF_MMIO_HWPORT();
326 if ( 32 - sifman_internals.dmatag_index <
count )
328 dmatag_index = sifman_internals.dmatag_index;
329 dma_count = sifman_internals.dma_count;
330 if ( sifman_internals.dmatag_index )
331 sifman_internals.sif_curbuf[sifman_internals.dmatag_index - 1].data &= ~0x80000000;
332 for ( i = 0; i <
count; ++dmat )
334 sif_dma_setup_tag(dmat);
337 sifman_internals.sif_curbuf[sifman_internals.dmatag_index - 1].data |= 0x80000000;
340 sifman_internals.sif_curbufcom->info[sifman_internals.sif_curbufcom->count].func = func;
341 sifman_internals.sif_curbufcom->info[sifman_internals.sif_curbufcom->count++].userdata = data;
343 v14 = dma_count << 16;
344 if ( (iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000) == 0 )
346 v14 = dma_count << 16;
347 if ( iop_mmio_hwport->dmac2.new_unusedch.madr == 0 )
349 v14 = dma_count << 16;
350 if ( (iop_mmio_hwport->dmac2.dicr2 & 0x4000000) == 0 )
352 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
353 iop_mmio_hwport->dmac2.newch[2].tadr = (uiptr)(sifman_internals.sif_curbuf);
354 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
355 sif_mmio_hwport->controlreg = 32;
356 iop_mmio_hwport->dmac2.newch[2].bcr = (iop_mmio_hwport->dmac2.newch[2].bcr & 0xFFFF0000) | 32;
357 sifman_internals.dmatag_index = 0;
358 ++sifman_internals.dma_count;
359 if ( sifman_internals.sif_curbuf == sifman_internals.sif_buf1 )
361 sifman_internals.sif_curbuf = sifman_internals.sif_buf2;
362 sifman_internals.sif_curbufcom = &sifman_internals.sif_bufcom2;
363 p_sif_bufcom1 = &sifman_internals.sif_bufcom1;
367 sifman_internals.sif_curbuf = sifman_internals.sif_buf1;
368 sifman_internals.sif_curbufcom = &sifman_internals.sif_bufcom1;
369 p_sif_bufcom1 = &sifman_internals.sif_bufcom2;
371 sifman_internals.sif_otherbufcom = p_sif_bufcom1;
372 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000701;
373 v14 = dma_count << 16;
377 return v14 | (dmatag_index << 8) | (u8)
count;
382 return set_dma_inner(dmat,
count, 0, 0);
385unsigned int sceSifSetDmaIntr(
SifDmaTransfer_t *dmat,
int count,
void (*completioncb)(
void *userdata),
void *userdata)
387 return set_dma_inner(dmat,
count, completioncb, userdata);
390static int dma_stat_inner(
unsigned int a1)
392 USE_IOP_MMIO_HWPORT();
394 if ( (iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000) == 0 && !iop_mmio_hwport->dmac2.new_unusedch.madr )
396 if ( (iop_mmio_hwport->dmac2.dicr2 & 0x4000000) == 0 )
399 if ( sifman_internals.dma_count != ((a1 >> 16) & 0xFFFF) )
401 if ( sifman_internals.dma_count == (u16)(((a1 >> 16) & 0xFFFF) + 1) )
408int sceSifDmaStat(
int trid)
414 return dma_stat_inner(trid);
416 v2 = dma_stat_inner(trid);
426 USE_IOP_MMIO_HWPORT();
427 USE_SIF_MMIO_HWPORT();
429 v1 = ((int)dmat.src & 0xFFFFFF) | 0x80000000;
430 v2 = ((
unsigned int)dmat.size >> 2) + ((dmat.size & 3) != 0);
431 sifman_internals.one.data = v1;
432 sifman_internals.one.words = v2 & 0xFFFFFF;
433 if ( (dmat.attr & 2) != 0 )
434 sifman_internals.one.data = v1 | 0x40000000;
438 sifman_internals.one.count = v3 | 0x10000000;
439 if ( (dmat.attr & 4) != 0 )
440 sifman_internals.one.count |= 0x80000000;
441 sifman_internals.one.addr = (int)dmat.dest & 0xFFFFFFF;
442 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
443 sif_mmio_hwport->controlreg = 32;
444 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
445 iop_mmio_hwport->dmac2.newch[2].tadr = (uiptr)&sifman_internals.one;
446 iop_mmio_hwport->dmac2.newch[2].bcr = (iop_mmio_hwport->dmac2.newch[2].bcr & 0xFFFF0000) | 32;
447 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000701;
452 USE_IOP_MMIO_HWPORT();
454 while ( iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000 )
460 USE_IOP_MMIO_HWPORT();
462 return iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000;
465void sceSifDma0Transfer(
void *addr,
int size,
int mode)
469 USE_IOP_MMIO_HWPORT();
470 USE_SIF_MMIO_HWPORT();
474 v4 = ((
unsigned int)size >> 2) + ((size & 3) != 0);
475 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
476 sif_mmio_hwport->controlreg = 32;
477 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
478 iop_mmio_hwport->dmac2.newch[2].madr = (
unsigned int)addr & 0xFFFFFF;
479 if ( (v4 & 0x1F) != 0 )
483 iop_mmio_hwport->dmac2.newch[2].bcr = ((v5 & 0xFFFF) << 16) | 32;
484 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000201;
489 USE_IOP_MMIO_HWPORT();
491 while ( iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000 )
495int sceSifDma0Sending()
497 USE_IOP_MMIO_HWPORT();
499 return iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000;
502void sceSifDma1Transfer(
void *addr,
int size,
int mode)
507 USE_IOP_MMIO_HWPORT();
508 USE_SIF_MMIO_HWPORT();
510 v4 = ((
unsigned int)size >> 2) + ((size & 3) != 0);
511 if ( (sif_mmio_hwport->controlreg & 0x40) == 0 )
512 sif_mmio_hwport->controlreg = 64;
513 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
514 iop_mmio_hwport->dmac2.newch[3].madr = (
unsigned int)addr & 0xFFFFFF;
515 if ( (v4 & 0x1F) != 0 )
519 iop_mmio_hwport->dmac2.newch[3].bcr = ((v5 & 0xFFFF) << 16) | 32;
520 if ( (mode & 0x10) != 0 )
524 iop_mmio_hwport->dmac2.newch[3].chcr = v6 | 0x200;
529 USE_IOP_MMIO_HWPORT();
531 while ( iop_mmio_hwport->dmac2.newch[3].chcr & 0x1000000 )
535int sceSifDma1Sending()
537 USE_IOP_MMIO_HWPORT();
539 return iop_mmio_hwport->dmac2.newch[3].chcr & 0x1000000;
542void sceSifDma2Transfer(
void *addr,
int size,
int mode)
548 USE_IOP_MMIO_HWPORT();
549 USE_SIF_MMIO_HWPORT();
551 v4 = ((
unsigned int)size >> 2) + ((size & 3) != 0);
552 if ( (sif_mmio_hwport->controlreg & 0x80) == 0 )
553 sif_mmio_hwport->controlreg = 128;
555 iop_mmio_hwport->dmac1.oldch[2].chcr = 0;
556 iop_mmio_hwport->dmac1.oldch[2].madr = (
unsigned int)addr & 0xFFFFFF;
559 if ( (v4 & 0x1F) != 0 )
563 iop_mmio_hwport->dmac1.oldch[2].bcr = ((v6 & 0xFFFF) << 16) | (v5 & 0xFFFF);
564 if ( (mode & 1) != 0 )
572 if ( (mode & 0x10) != 0 )
578 iop_mmio_hwport->dmac1.oldch[2].chcr = v7;
583 USE_IOP_MMIO_HWPORT();
585 while ( iop_mmio_hwport->dmac1.oldch[2].chcr & 0x1000000 )
589int sceSifDma2Sending()
591 USE_IOP_MMIO_HWPORT();
593 return iop_mmio_hwport->dmac1.oldch[2].chcr & 0x1000000;
601u32 sceSifSetMSFlag(u32 val)
603 USE_SIF_MMIO_HWPORT();
605 sif_mmio_hwport->msflag = val;
614u32 sceSifSetSMFlag(u32 val)
616 USE_SIF_MMIO_HWPORT();
618 sif_mmio_hwport->smflag = val;
622u32 sceSifGetMainAddr()
624 USE_SIF_MMIO_HWPORT();
626 return sif_mmio_hwport->mscom;
629u32 sceSifGetSubAddr()
631 USE_SIF_MMIO_HWPORT();
633 return sif_mmio_hwport->smcom;
636u32 sceSifSetSubAddr(u32 addr)
638 USE_SIF_MMIO_HWPORT();
640 sif_mmio_hwport->smcom = addr;
641 return sif_mmio_hwport->smcom;
647 USE_IOP_MMIO_HWPORT();
649 v0 = iop_mmio_hwport->iop_sbus_ctrl[0];
650 iop_mmio_hwport->iop_sbus_ctrl[0] = v0 | 2;
652 iop_mmio_hwport->iop_sbus_ctrl[0] = v0 & 0xFFFFFFFD;
int CpuResumeIntr(int state)
int ReleaseIntrHandler(int irq)
int QueryIntrContext(void)
int DisableIntr(int irq, int *res)
int CpuSuspendIntr(int *state)
int RegisterIntrHandler(int irq, int mode, int(*handler)(void *arg), void *arg)
u32 count
start sector of fragmented bd/file