2 * dim2_hal.c - DIM2 HAL implementation
3 * (MediaLB, Device Interface Macro IP, OS62420)
5 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * This file is licensed under GPLv2.
15 /* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
18 #include "dim2_errors.h"
20 #include <linux/stddef.h>
21 #include <linux/kernel.h>
24 * Size factor for isochronous DBR buffer.
27 #define ISOC_DBR_FACTOR 3u
30 * Number of 32-bit units for DBR map.
32 * 1: block size is 512, max allocation is 16K
33 * 2: block size is 256, max allocation is 8K
34 * 4: block size is 128, max allocation is 4K
35 * 8: block size is 64, max allocation is 2K
37 * Min allocated space is block size.
38 * Max possible allocated space is 32 blocks.
40 #define DBR_MAP_SIZE 2
42 /* -------------------------------------------------------------------------- */
43 /* not configurable area */
50 #define DBR_SIZE (16 * 1024) /* specified by IP */
51 #define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
53 #define ROUND_UP_TO(x, d) (DIV_ROUND_UP(x, (d)) * (d))
55 /* -------------------------------------------------------------------------- */
56 /* generic helper functions and macros */
58 static inline u32 bit_mask(u8 position)
60 return (u32)1 << position;
63 static inline bool dim_on_error(u8 error_id, const char *error_message)
65 dimcb_on_error(error_id, error_message);
69 /* -------------------------------------------------------------------------- */
70 /* types and local variables */
77 u16 sz_queue[CDT0_RPC_MASK + 1];
80 struct lld_global_vars_t {
81 bool dim_is_initialized;
82 bool mcm_is_initialized;
83 struct dim2_regs __iomem *dim2; /* DIM2 core base address */
84 struct async_tx_dbr atx_dbr;
86 u32 dbr_map[DBR_MAP_SIZE];
89 static struct lld_global_vars_t g = { false };
91 /* -------------------------------------------------------------------------- */
93 static int dbr_get_mask_size(u16 size)
97 for (i = 0; i < 6; i++)
98 if (size <= (DBR_BLOCK_SIZE << i))
104 * Allocates DBR memory.
105 * @param size Allocating memory size.
106 * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
108 static int alloc_dbr(u16 size)
111 int i, block_idx = 0;
114 return DBR_SIZE; /* out of memory */
116 mask_size = dbr_get_mask_size(size);
118 return DBR_SIZE; /* out of memory */
120 for (i = 0; i < DBR_MAP_SIZE; i++) {
121 u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
122 u32 mask = ~((~(u32)0) << blocks);
125 if ((g.dbr_map[i] & mask) == 0) {
126 g.dbr_map[i] |= mask;
127 return block_idx * DBR_BLOCK_SIZE;
129 block_idx += mask_size;
130 /* do shift left with 2 steps in case mask_size == 32 */
131 mask <<= mask_size - 1;
132 } while ((mask <<= 1) != 0);
135 return DBR_SIZE; /* out of memory */
138 static void free_dbr(int offs, int size)
140 int block_idx = offs / DBR_BLOCK_SIZE;
141 u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
142 u32 mask = ~((~(u32)0) << blocks);
144 mask <<= block_idx % 32;
145 g.dbr_map[block_idx / 32] &= ~mask;
148 /* -------------------------------------------------------------------------- */
150 static void dim2_transfer_madr(u32 val)
152 dimcb_io_write(&g.dim2->MADR, val);
154 /* wait for transfer completion */
155 while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
158 dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
161 static void dim2_clear_dbr(u16 addr, u16 size)
163 enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
165 u16 const end_addr = addr + size;
166 u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
168 dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
169 dimcb_io_write(&g.dim2->MDAT0, 0);
171 for (; addr < end_addr; addr++)
172 dim2_transfer_madr(cmd | addr);
175 static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
177 dim2_transfer_madr(ctr_addr);
179 return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
182 static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
184 enum { MADR_WNR_BIT = 31 };
186 dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
189 dimcb_io_write(&g.dim2->MDAT0, value[0]);
191 dimcb_io_write(&g.dim2->MDAT1, value[1]);
193 dimcb_io_write(&g.dim2->MDAT2, value[2]);
195 dimcb_io_write(&g.dim2->MDAT3, value[3]);
197 dimcb_io_write(&g.dim2->MDWE0, mask[0]);
198 dimcb_io_write(&g.dim2->MDWE1, mask[1]);
199 dimcb_io_write(&g.dim2->MDWE2, mask[2]);
200 dimcb_io_write(&g.dim2->MDWE3, mask[3]);
202 dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
205 static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
207 u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
209 dim2_write_ctr_mask(ctr_addr, mask, value);
212 static inline void dim2_clear_ctr(u32 ctr_addr)
214 u32 const value[4] = { 0, 0, 0, 0 };
216 dim2_write_ctr(ctr_addr, value);
219 static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
222 bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
223 bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
225 (read_not_write << CAT_RNW_BIT) |
226 (ch_type << CAT_CT_SHIFT) |
227 (ch_addr << CAT_CL_SHIFT) |
228 (isoc_fce << CAT_FCE_BIT) |
229 (sync_mfe << CAT_MFE_BIT) |
230 (false << CAT_MT_BIT) |
231 (true << CAT_CE_BIT);
232 u8 const ctr_addr = cat_base + ch_addr / 8;
233 u8 const idx = (ch_addr % 8) / 2;
234 u8 const shift = (ch_addr % 2) * 16;
235 u32 mask[4] = { 0, 0, 0, 0 };
236 u32 value[4] = { 0, 0, 0, 0 };
238 mask[idx] = (u32)0xFFFF << shift;
239 value[idx] = cat << shift;
240 dim2_write_ctr_mask(ctr_addr, mask, value);
243 static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
245 u8 const ctr_addr = cat_base + ch_addr / 8;
246 u8 const idx = (ch_addr % 8) / 2;
247 u8 const shift = (ch_addr % 2) * 16;
248 u32 mask[4] = { 0, 0, 0, 0 };
249 u32 value[4] = { 0, 0, 0, 0 };
251 mask[idx] = (u32)0xFFFF << shift;
252 dim2_write_ctr_mask(ctr_addr, mask, value);
255 static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
258 u32 cdt[4] = { 0, 0, 0, 0 };
261 cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
264 ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
265 (dbr_address << CDT3_BA_SHIFT);
266 dim2_write_ctr(CDT + ch_addr, cdt);
269 static u16 dim2_rpc(u8 ch_addr)
271 u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
273 return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
276 static void dim2_clear_cdt(u8 ch_addr)
278 u32 cdt[4] = { 0, 0, 0, 0 };
280 dim2_write_ctr(CDT + ch_addr, cdt);
283 static void dim2_configure_adt(u8 ch_addr)
285 u32 adt[4] = { 0, 0, 0, 0 };
288 (true << ADT0_CE_BIT) |
289 (true << ADT0_LE_BIT) |
292 dim2_write_ctr(ADT + ch_addr, adt);
295 static void dim2_clear_adt(u8 ch_addr)
297 u32 adt[4] = { 0, 0, 0, 0 };
299 dim2_write_ctr(ADT + ch_addr, adt);
302 static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
305 u8 const shift = idx * 16;
307 u32 mask[4] = { 0, 0, 0, 0 };
308 u32 adt[4] = { 0, 0, 0, 0 };
311 bit_mask(ADT1_PS_BIT + shift) |
312 bit_mask(ADT1_RDY_BIT + shift) |
313 (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
315 (true << (ADT1_PS_BIT + shift)) |
316 (true << (ADT1_RDY_BIT + shift)) |
317 ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
319 mask[idx + 2] = 0xFFFFFFFF;
320 adt[idx + 2] = buf_addr;
322 dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
325 static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
328 u8 const shift = idx * 16;
330 u32 mask[4] = { 0, 0, 0, 0 };
331 u32 adt[4] = { 0, 0, 0, 0 };
334 bit_mask(ADT1_RDY_BIT + shift) |
335 (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
337 (true << (ADT1_RDY_BIT + shift)) |
338 ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
340 mask[idx + 2] = 0xFFFFFFFF;
341 adt[idx + 2] = buf_addr;
343 dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
346 static void dim2_clear_ctram(void)
350 for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
351 dim2_clear_ctr(ctr_addr);
354 static void dim2_configure_channel(
355 u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
358 dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
359 dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
361 dim2_configure_adt(ch_addr);
362 dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
364 /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
365 dimcb_io_write(&g.dim2->ACMR0,
366 dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
369 static void dim2_clear_channel(u8 ch_addr)
371 /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
372 dimcb_io_write(&g.dim2->ACMR0,
373 dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
375 dim2_clear_cat(AHB_CAT, ch_addr);
376 dim2_clear_adt(ch_addr);
378 dim2_clear_cat(MLB_CAT, ch_addr);
379 dim2_clear_cdt(ch_addr);
381 /* clear channel status bit */
382 dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
385 /* -------------------------------------------------------------------------- */
386 /* trace async tx dbr fill state */
388 static inline u16 norm_pc(u16 pc)
390 return pc & CDT0_RPC_MASK;
393 static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
395 g.atx_dbr.rest_size = dbr_size;
396 g.atx_dbr.rpc = dim2_rpc(ch_addr);
397 g.atx_dbr.wpc = g.atx_dbr.rpc;
400 static void dbrcnt_enq(int buf_sz)
402 g.atx_dbr.rest_size -= buf_sz;
403 g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
407 u16 dim_dbr_space(struct dim_channel *ch)
410 struct async_tx_dbr *dbr = &g.atx_dbr;
412 if (ch->addr != dbr->ch_addr)
415 cur_rpc = dim2_rpc(ch->addr);
417 while (norm_pc(dbr->rpc) != cur_rpc) {
418 dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
422 if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
425 return dbr->rest_size;
428 /* -------------------------------------------------------------------------- */
429 /* channel state helpers */
431 static void state_init(struct int_ch_state *state)
433 state->request_counter = 0;
434 state->service_counter = 0;
441 /* -------------------------------------------------------------------------- */
442 /* macro helper functions */
444 static inline bool check_channel_address(u32 ch_address)
446 return ch_address > 0 && (ch_address % 2) == 0 &&
447 (ch_address / 2) <= (u32)CAT_CL_MASK;
450 static inline bool check_packet_length(u32 packet_length)
452 u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
454 if (packet_length <= 0)
455 return false; /* too small */
457 if (packet_length > max_size)
458 return false; /* too big */
460 if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
461 return false; /* too big */
466 static inline bool check_bytes_per_frame(u32 bytes_per_frame)
468 u16 const bd_factor = g.fcnt + 2;
469 u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
471 if (bytes_per_frame <= 0)
472 return false; /* too small */
474 if (bytes_per_frame > max_size)
475 return false; /* too big */
480 static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
482 u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
484 if (buf_size > max_size)
490 static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
493 u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
495 if (buf_size > max_size)
498 n = buf_size / packet_length;
501 return 0; /* too small buffer for given packet_length */
503 return packet_length * n;
506 static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
509 u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
510 u32 const unit = bytes_per_frame << g.fcnt;
512 if (buf_size > max_size)
518 return 0; /* too small buffer for given bytes_per_frame */
523 static void dim2_cleanup(void)
525 /* disable MediaLB */
526 dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
530 /* disable mlb_int interrupt */
531 dimcb_io_write(&g.dim2->MIEN, 0);
533 /* clear status for all dma channels */
534 dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
535 dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
537 /* mask interrupts for all channels */
538 dimcb_io_write(&g.dim2->ACMR0, 0);
539 dimcb_io_write(&g.dim2->ACMR1, 0);
542 static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
546 /* configure and enable MediaLB */
547 dimcb_io_write(&g.dim2->MLBC0,
548 enable_6pin << MLBC0_MLBPEN_BIT |
549 mlb_clock << MLBC0_MLBCLK_SHIFT |
550 g.fcnt << MLBC0_FCNT_SHIFT |
551 true << MLBC0_MLBEN_BIT);
553 /* activate all HBI channels */
554 dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
555 dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
558 dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
561 dimcb_io_write(&g.dim2->ACTL,
562 ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
563 true << ACTL_SCE_BIT);
566 static bool dim2_is_mlb_locked(void)
568 u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
569 u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
570 bit_mask(MLBC1_LOCKERR_BIT);
571 u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
572 u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
574 dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
575 return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
576 (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
579 /* -------------------------------------------------------------------------- */
580 /* channel help routines */
582 static inline bool service_channel(u8 ch_addr, u8 idx)
584 u8 const shift = idx * 16;
585 u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
586 u32 mask[4] = { 0, 0, 0, 0 };
587 u32 adt_w[4] = { 0, 0, 0, 0 };
589 if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
593 bit_mask(ADT1_DNE_BIT + shift) |
594 bit_mask(ADT1_ERR_BIT + shift) |
595 bit_mask(ADT1_RDY_BIT + shift);
596 dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
598 /* clear channel status bit */
599 dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
604 /* -------------------------------------------------------------------------- */
605 /* channel init routines */
607 static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
609 state_init(&ch->state);
613 ch->packet_length = packet_length;
614 ch->bytes_per_frame = 0;
615 ch->done_sw_buffers_number = 0;
618 static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
620 state_init(&ch->state);
624 ch->packet_length = 0;
625 ch->bytes_per_frame = bytes_per_frame;
626 ch->done_sw_buffers_number = 0;
629 static void channel_init(struct dim_channel *ch, u8 ch_addr)
631 state_init(&ch->state);
635 ch->packet_length = 0;
636 ch->bytes_per_frame = 0;
637 ch->done_sw_buffers_number = 0;
640 /* returns true if channel interrupt state is cleared */
641 static bool channel_service_interrupt(struct dim_channel *ch)
643 struct int_ch_state *const state = &ch->state;
645 if (!service_channel(ch->addr, state->idx2))
649 state->request_counter++;
653 static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
655 struct int_ch_state *const state = &ch->state;
658 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
660 if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
661 buf_size != norm_ctrl_async_buffer_size(buf_size))
662 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
663 "Bad control/async buffer size");
665 if (ch->packet_length &&
666 buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
667 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
668 "Bad isochronous buffer size");
670 if (ch->bytes_per_frame &&
671 buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
672 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
673 "Bad synchronous buffer size");
675 if (state->level >= 2u)
676 return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
680 if (ch->addr == g.atx_dbr.ch_addr)
681 dbrcnt_enq(buf_size);
683 if (ch->packet_length || ch->bytes_per_frame)
684 dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
686 dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
693 static u8 channel_service(struct dim_channel *ch)
695 struct int_ch_state *const state = &ch->state;
697 if (state->service_counter != state->request_counter) {
698 state->service_counter++;
699 if (state->level == 0)
700 return DIM_ERR_UNDERFLOW;
703 ch->done_sw_buffers_number++;
709 static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
711 if (buffers_number > ch->done_sw_buffers_number)
712 return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
714 ch->done_sw_buffers_number -= buffers_number;
718 /* -------------------------------------------------------------------------- */
721 u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
724 g.dim_is_initialized = false;
726 if (!dim_base_address)
727 return DIM_INIT_ERR_DIM_ADDR;
729 /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
730 /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
732 return DIM_INIT_ERR_MLB_CLOCK;
734 if (fcnt > MLBC0_FCNT_MAX_VAL)
735 return DIM_INIT_ERR_MLB_CLOCK;
737 g.dim2 = dim_base_address;
742 dim2_initialize(mlb_clock >= 3, mlb_clock);
744 g.dim_is_initialized = true;
749 void dim_shutdown(void)
751 g.dim_is_initialized = false;
755 bool dim_get_lock_state(void)
757 return dim2_is_mlb_locked();
760 static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
761 u16 ch_address, u16 hw_buffer_size)
763 if (!g.dim_is_initialized || !ch)
764 return DIM_ERR_DRIVER_NOT_INITIALIZED;
766 if (!check_channel_address(ch_address))
767 return DIM_INIT_ERR_CHANNEL_ADDRESS;
769 ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
770 ch->dbr_addr = alloc_dbr(ch->dbr_size);
771 if (ch->dbr_addr >= DBR_SIZE)
772 return DIM_INIT_ERR_OUT_OF_MEMORY;
774 channel_init(ch, ch_address / 2);
776 dim2_configure_channel(ch->addr, type, is_tx,
777 ch->dbr_addr, ch->dbr_size, 0);
782 void dim_service_mlb_int_irq(void)
784 dimcb_io_write(&g.dim2->MS0, 0);
785 dimcb_io_write(&g.dim2->MS1, 0);
788 u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
790 return norm_ctrl_async_buffer_size(buf_size);
794 * Retrieves maximal possible correct buffer size for isochronous data type
795 * conform to given packet length and not bigger than given buffer size.
797 * Returns non-zero correct buffer size or zero by error.
799 u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
801 if (!check_packet_length(packet_length))
804 return norm_isoc_buffer_size(buf_size, packet_length);
808 * Retrieves maximal possible correct buffer size for synchronous data type
809 * conform to given bytes per frame and not bigger than given buffer size.
811 * Returns non-zero correct buffer size or zero by error.
813 u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
815 if (!check_bytes_per_frame(bytes_per_frame))
818 return norm_sync_buffer_size(buf_size, bytes_per_frame);
821 u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
824 return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
828 u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
831 u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
834 if (is_tx && !g.atx_dbr.ch_addr) {
835 g.atx_dbr.ch_addr = ch->addr;
836 dbrcnt_init(ch->addr, ch->dbr_size);
837 dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
843 u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
846 if (!g.dim_is_initialized || !ch)
847 return DIM_ERR_DRIVER_NOT_INITIALIZED;
849 if (!check_channel_address(ch_address))
850 return DIM_INIT_ERR_CHANNEL_ADDRESS;
852 if (!check_packet_length(packet_length))
853 return DIM_ERR_BAD_CONFIG;
855 ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
856 ch->dbr_addr = alloc_dbr(ch->dbr_size);
857 if (ch->dbr_addr >= DBR_SIZE)
858 return DIM_INIT_ERR_OUT_OF_MEMORY;
860 isoc_init(ch, ch_address / 2, packet_length);
862 dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
863 ch->dbr_size, packet_length);
868 u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
871 u16 bd_factor = g.fcnt + 2;
873 if (!g.dim_is_initialized || !ch)
874 return DIM_ERR_DRIVER_NOT_INITIALIZED;
876 if (!check_channel_address(ch_address))
877 return DIM_INIT_ERR_CHANNEL_ADDRESS;
879 if (!check_bytes_per_frame(bytes_per_frame))
880 return DIM_ERR_BAD_CONFIG;
882 ch->dbr_size = bytes_per_frame << bd_factor;
883 ch->dbr_addr = alloc_dbr(ch->dbr_size);
884 if (ch->dbr_addr >= DBR_SIZE)
885 return DIM_INIT_ERR_OUT_OF_MEMORY;
887 sync_init(ch, ch_address / 2, bytes_per_frame);
889 dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
890 dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
891 ch->dbr_addr, ch->dbr_size, 0);
896 u8 dim_destroy_channel(struct dim_channel *ch)
898 if (!g.dim_is_initialized || !ch)
899 return DIM_ERR_DRIVER_NOT_INITIALIZED;
901 if (ch->addr == g.atx_dbr.ch_addr) {
902 dimcb_io_write(&g.dim2->MIEN, 0);
903 g.atx_dbr.ch_addr = 0;
906 dim2_clear_channel(ch->addr);
907 if (ch->dbr_addr < DBR_SIZE)
908 free_dbr(ch->dbr_addr, ch->dbr_size);
909 ch->dbr_addr = DBR_SIZE;
914 void dim_service_ahb_int_irq(struct dim_channel *const *channels)
918 if (!g.dim_is_initialized) {
919 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
920 "DIM is not initialized");
925 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
930 * Use while-loop and a flag to make sure the age is changed back at
931 * least once, otherwise the interrupt may never come if CPU generates
932 * interrupt on changing age.
933 * This cycle runs not more than number of channels, because
934 * channel_service_interrupt() routine doesn't start the channel again.
937 struct dim_channel *const *ch = channels;
939 state_changed = false;
942 state_changed |= channel_service_interrupt(*ch);
945 } while (state_changed);
948 u8 dim_service_channel(struct dim_channel *ch)
950 if (!g.dim_is_initialized || !ch)
951 return DIM_ERR_DRIVER_NOT_INITIALIZED;
953 return channel_service(ch);
956 struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
957 struct dim_ch_state_t *state_ptr)
959 if (!ch || !state_ptr)
962 state_ptr->ready = ch->state.level < 2;
963 state_ptr->done_buffers = ch->done_sw_buffers_number;
968 bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
972 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
975 return channel_start(ch, buffer_addr, buffer_size);
978 bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
981 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
984 return channel_detach_buffers(ch, buffers_number);