2 * dim2_hal.c - DIM2 HAL implementation
3 * (MediaLB, Device Interface Macro IP, OS62420)
5 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * This file is licensed under GPLv2.
15 /* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
18 #include "dim2_errors.h"
20 #include <linux/stddef.h>
23 * Size factor for isochronous DBR buffer.
26 #define ISOC_DBR_FACTOR 3u
29 * Number of 32-bit units for DBR map.
31 * 1: block size is 512, max allocation is 16K
32 * 2: block size is 256, max allocation is 8K
33 * 4: block size is 128, max allocation is 4K
34 * 8: block size is 64, max allocation is 2K
36 * Min allocated space is block size.
37 * Max possible allocated space is 32 blocks.
39 #define DBR_MAP_SIZE 2
41 /* -------------------------------------------------------------------------- */
42 /* not configurable area */
49 #define DBR_SIZE (16 * 1024) /* specified by IP */
50 #define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
52 #define ROUND_UP_TO(x, d) (((x) + (d) - 1) / (d) * (d))
54 /* -------------------------------------------------------------------------- */
55 /* generic helper functions and macros */
57 static inline u32 bit_mask(u8 position)
59 return (u32)1 << position;
62 static inline bool dim_on_error(u8 error_id, const char *error_message)
64 dimcb_on_error(error_id, error_message);
68 /* -------------------------------------------------------------------------- */
69 /* types and local variables */
76 u16 sz_queue[CDT0_RPC_MASK + 1];
79 struct lld_global_vars_t {
80 bool dim_is_initialized;
81 bool mcm_is_initialized;
82 struct dim2_regs __iomem *dim2; /* DIM2 core base address */
83 struct async_tx_dbr atx_dbr;
85 u32 dbr_map[DBR_MAP_SIZE];
88 static struct lld_global_vars_t g = { false };
90 /* -------------------------------------------------------------------------- */
92 static int dbr_get_mask_size(u16 size)
96 for (i = 0; i < 6; i++)
97 if (size <= (DBR_BLOCK_SIZE << i))
103 * Allocates DBR memory.
104 * @param size Allocating memory size.
105 * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
107 static int alloc_dbr(u16 size)
110 int i, block_idx = 0;
113 return DBR_SIZE; /* out of memory */
115 mask_size = dbr_get_mask_size(size);
117 return DBR_SIZE; /* out of memory */
119 for (i = 0; i < DBR_MAP_SIZE; i++) {
120 u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
121 u32 mask = ~((~(u32)0) << blocks);
124 if ((g.dbr_map[i] & mask) == 0) {
125 g.dbr_map[i] |= mask;
126 return block_idx * DBR_BLOCK_SIZE;
128 block_idx += mask_size;
129 /* do shift left with 2 steps in case mask_size == 32 */
130 mask <<= mask_size - 1;
131 } while ((mask <<= 1) != 0);
134 return DBR_SIZE; /* out of memory */
137 static void free_dbr(int offs, int size)
139 int block_idx = offs / DBR_BLOCK_SIZE;
140 u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
141 u32 mask = ~((~(u32)0) << blocks);
143 mask <<= block_idx % 32;
144 g.dbr_map[block_idx / 32] &= ~mask;
147 /* -------------------------------------------------------------------------- */
149 static void dim2_transfer_madr(u32 val)
151 dimcb_io_write(&g.dim2->MADR, val);
153 /* wait for transfer completion */
154 while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
157 dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
160 static void dim2_clear_dbr(u16 addr, u16 size)
162 enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
164 u16 const end_addr = addr + size;
165 u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
167 dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
168 dimcb_io_write(&g.dim2->MDAT0, 0);
170 for (; addr < end_addr; addr++)
171 dim2_transfer_madr(cmd | addr);
174 static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
176 dim2_transfer_madr(ctr_addr);
178 return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
181 static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
183 enum { MADR_WNR_BIT = 31 };
185 dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
188 dimcb_io_write(&g.dim2->MDAT0, value[0]);
190 dimcb_io_write(&g.dim2->MDAT1, value[1]);
192 dimcb_io_write(&g.dim2->MDAT2, value[2]);
194 dimcb_io_write(&g.dim2->MDAT3, value[3]);
196 dimcb_io_write(&g.dim2->MDWE0, mask[0]);
197 dimcb_io_write(&g.dim2->MDWE1, mask[1]);
198 dimcb_io_write(&g.dim2->MDWE2, mask[2]);
199 dimcb_io_write(&g.dim2->MDWE3, mask[3]);
201 dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
204 static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
206 u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
208 dim2_write_ctr_mask(ctr_addr, mask, value);
211 static inline void dim2_clear_ctr(u32 ctr_addr)
213 u32 const value[4] = { 0, 0, 0, 0 };
215 dim2_write_ctr(ctr_addr, value);
218 static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
219 bool read_not_write, bool sync_mfe)
222 (read_not_write << CAT_RNW_BIT) |
223 (ch_type << CAT_CT_SHIFT) |
224 (ch_addr << CAT_CL_SHIFT) |
225 (sync_mfe << CAT_MFE_BIT) |
226 (false << CAT_MT_BIT) |
227 (true << CAT_CE_BIT);
228 u8 const ctr_addr = cat_base + ch_addr / 8;
229 u8 const idx = (ch_addr % 8) / 2;
230 u8 const shift = (ch_addr % 2) * 16;
231 u32 mask[4] = { 0, 0, 0, 0 };
232 u32 value[4] = { 0, 0, 0, 0 };
234 mask[idx] = (u32)0xFFFF << shift;
235 value[idx] = cat << shift;
236 dim2_write_ctr_mask(ctr_addr, mask, value);
239 static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
241 u8 const ctr_addr = cat_base + ch_addr / 8;
242 u8 const idx = (ch_addr % 8) / 2;
243 u8 const shift = (ch_addr % 2) * 16;
244 u32 mask[4] = { 0, 0, 0, 0 };
245 u32 value[4] = { 0, 0, 0, 0 };
247 mask[idx] = (u32)0xFFFF << shift;
248 dim2_write_ctr_mask(ctr_addr, mask, value);
251 static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
254 u32 cdt[4] = { 0, 0, 0, 0 };
257 cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
260 ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
261 (dbr_address << CDT3_BA_SHIFT);
262 dim2_write_ctr(CDT + ch_addr, cdt);
265 static u16 dim2_rpc(u8 ch_addr)
267 u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
269 return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
272 static void dim2_clear_cdt(u8 ch_addr)
274 u32 cdt[4] = { 0, 0, 0, 0 };
276 dim2_write_ctr(CDT + ch_addr, cdt);
279 static void dim2_configure_adt(u8 ch_addr)
281 u32 adt[4] = { 0, 0, 0, 0 };
284 (true << ADT0_CE_BIT) |
285 (true << ADT0_LE_BIT) |
288 dim2_write_ctr(ADT + ch_addr, adt);
291 static void dim2_clear_adt(u8 ch_addr)
293 u32 adt[4] = { 0, 0, 0, 0 };
295 dim2_write_ctr(ADT + ch_addr, adt);
298 static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
301 u8 const shift = idx * 16;
303 u32 mask[4] = { 0, 0, 0, 0 };
304 u32 adt[4] = { 0, 0, 0, 0 };
307 bit_mask(ADT1_PS_BIT + shift) |
308 bit_mask(ADT1_RDY_BIT + shift) |
309 (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
311 (true << (ADT1_PS_BIT + shift)) |
312 (true << (ADT1_RDY_BIT + shift)) |
313 ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
315 mask[idx + 2] = 0xFFFFFFFF;
316 adt[idx + 2] = buf_addr;
318 dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
321 static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
324 u8 const shift = idx * 16;
326 u32 mask[4] = { 0, 0, 0, 0 };
327 u32 adt[4] = { 0, 0, 0, 0 };
330 bit_mask(ADT1_RDY_BIT + shift) |
331 (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
333 (true << (ADT1_RDY_BIT + shift)) |
334 ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
336 mask[idx + 2] = 0xFFFFFFFF;
337 adt[idx + 2] = buf_addr;
339 dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
342 static void dim2_clear_ctram(void)
346 for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
347 dim2_clear_ctr(ctr_addr);
350 static void dim2_configure_channel(
351 u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
352 u16 packet_length, bool sync_mfe)
354 dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
355 dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
357 dim2_configure_adt(ch_addr);
358 dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
360 /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
361 dimcb_io_write(&g.dim2->ACMR0,
362 dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
365 static void dim2_clear_channel(u8 ch_addr)
367 /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
368 dimcb_io_write(&g.dim2->ACMR0,
369 dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
371 dim2_clear_cat(AHB_CAT, ch_addr);
372 dim2_clear_adt(ch_addr);
374 dim2_clear_cat(MLB_CAT, ch_addr);
375 dim2_clear_cdt(ch_addr);
377 /* clear channel status bit */
378 dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
381 /* -------------------------------------------------------------------------- */
382 /* trace async tx dbr fill state */
384 static inline u16 norm_pc(u16 pc)
386 return pc & CDT0_RPC_MASK;
389 static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
391 g.atx_dbr.rest_size = dbr_size;
392 g.atx_dbr.rpc = dim2_rpc(ch_addr);
393 g.atx_dbr.wpc = g.atx_dbr.rpc;
396 static void dbrcnt_enq(int buf_sz)
398 g.atx_dbr.rest_size -= buf_sz;
399 g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
403 u16 dim_dbr_space(struct dim_channel *ch)
406 struct async_tx_dbr *dbr = &g.atx_dbr;
408 if (ch->addr != dbr->ch_addr)
411 cur_rpc = dim2_rpc(ch->addr);
413 while (norm_pc(dbr->rpc) != cur_rpc) {
414 dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
418 if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
421 return dbr->rest_size;
424 /* -------------------------------------------------------------------------- */
425 /* channel state helpers */
427 static void state_init(struct int_ch_state *state)
429 state->request_counter = 0;
430 state->service_counter = 0;
437 /* -------------------------------------------------------------------------- */
438 /* macro helper functions */
440 static inline bool check_channel_address(u32 ch_address)
442 return ch_address > 0 && (ch_address % 2) == 0 &&
443 (ch_address / 2) <= (u32)CAT_CL_MASK;
446 static inline bool check_packet_length(u32 packet_length)
448 u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
450 if (packet_length <= 0)
451 return false; /* too small */
453 if (packet_length > max_size)
454 return false; /* too big */
456 if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
457 return false; /* too big */
462 static inline bool check_bytes_per_frame(u32 bytes_per_frame)
464 u16 const bd_factor = g.fcnt + 2;
465 u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
467 if (bytes_per_frame <= 0)
468 return false; /* too small */
470 if (bytes_per_frame > max_size)
471 return false; /* too big */
476 static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
478 u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
480 if (buf_size > max_size)
486 static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
489 u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
491 if (buf_size > max_size)
494 n = buf_size / packet_length;
497 return 0; /* too small buffer for given packet_length */
499 return packet_length * n;
502 static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
505 u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
506 u32 const unit = bytes_per_frame << g.fcnt;
508 if (buf_size > max_size)
514 return 0; /* too small buffer for given bytes_per_frame */
519 static void dim2_cleanup(void)
521 /* disable MediaLB */
522 dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
526 /* disable mlb_int interrupt */
527 dimcb_io_write(&g.dim2->MIEN, 0);
529 /* clear status for all dma channels */
530 dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
531 dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
533 /* mask interrupts for all channels */
534 dimcb_io_write(&g.dim2->ACMR0, 0);
535 dimcb_io_write(&g.dim2->ACMR1, 0);
538 static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
542 /* configure and enable MediaLB */
543 dimcb_io_write(&g.dim2->MLBC0,
544 enable_6pin << MLBC0_MLBPEN_BIT |
545 mlb_clock << MLBC0_MLBCLK_SHIFT |
546 g.fcnt << MLBC0_FCNT_SHIFT |
547 true << MLBC0_MLBEN_BIT);
549 /* activate all HBI channels */
550 dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
551 dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
554 dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
557 dimcb_io_write(&g.dim2->ACTL,
558 ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
559 true << ACTL_SCE_BIT);
562 static bool dim2_is_mlb_locked(void)
564 u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
565 u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
566 bit_mask(MLBC1_LOCKERR_BIT);
567 u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
568 u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
570 dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
571 return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
572 (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
575 /* -------------------------------------------------------------------------- */
576 /* channel help routines */
578 static inline bool service_channel(u8 ch_addr, u8 idx)
580 u8 const shift = idx * 16;
581 u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
582 u32 mask[4] = { 0, 0, 0, 0 };
583 u32 adt_w[4] = { 0, 0, 0, 0 };
585 if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
589 bit_mask(ADT1_DNE_BIT + shift) |
590 bit_mask(ADT1_ERR_BIT + shift) |
591 bit_mask(ADT1_RDY_BIT + shift);
592 dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
594 /* clear channel status bit */
595 dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
600 /* -------------------------------------------------------------------------- */
601 /* channel init routines */
603 static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
605 state_init(&ch->state);
609 ch->packet_length = packet_length;
610 ch->bytes_per_frame = 0;
611 ch->done_sw_buffers_number = 0;
614 static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
616 state_init(&ch->state);
620 ch->packet_length = 0;
621 ch->bytes_per_frame = bytes_per_frame;
622 ch->done_sw_buffers_number = 0;
625 static void channel_init(struct dim_channel *ch, u8 ch_addr)
627 state_init(&ch->state);
631 ch->packet_length = 0;
632 ch->bytes_per_frame = 0;
633 ch->done_sw_buffers_number = 0;
636 /* returns true if channel interrupt state is cleared */
637 static bool channel_service_interrupt(struct dim_channel *ch)
639 struct int_ch_state *const state = &ch->state;
641 if (!service_channel(ch->addr, state->idx2))
645 state->request_counter++;
649 static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
651 struct int_ch_state *const state = &ch->state;
654 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
656 if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
657 buf_size != norm_ctrl_async_buffer_size(buf_size))
658 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
659 "Bad control/async buffer size");
661 if (ch->packet_length &&
662 buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
663 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
664 "Bad isochronous buffer size");
666 if (ch->bytes_per_frame &&
667 buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
668 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
669 "Bad synchronous buffer size");
671 if (state->level >= 2u)
672 return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
676 if (ch->addr == g.atx_dbr.ch_addr)
677 dbrcnt_enq(buf_size);
679 if (ch->packet_length || ch->bytes_per_frame)
680 dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
682 dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
689 static u8 channel_service(struct dim_channel *ch)
691 struct int_ch_state *const state = &ch->state;
693 if (state->service_counter != state->request_counter) {
694 state->service_counter++;
695 if (state->level == 0)
696 return DIM_ERR_UNDERFLOW;
699 ch->done_sw_buffers_number++;
705 static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
707 if (buffers_number > ch->done_sw_buffers_number)
708 return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
710 ch->done_sw_buffers_number -= buffers_number;
714 /* -------------------------------------------------------------------------- */
717 u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
720 g.dim_is_initialized = false;
722 if (!dim_base_address)
723 return DIM_INIT_ERR_DIM_ADDR;
725 /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
726 /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
728 return DIM_INIT_ERR_MLB_CLOCK;
730 if (fcnt > MLBC0_FCNT_MAX_VAL)
731 return DIM_INIT_ERR_MLB_CLOCK;
733 g.dim2 = dim_base_address;
738 dim2_initialize(mlb_clock >= 3, mlb_clock);
740 g.dim_is_initialized = true;
745 void dim_shutdown(void)
747 g.dim_is_initialized = false;
751 bool dim_get_lock_state(void)
753 return dim2_is_mlb_locked();
756 static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
757 u16 ch_address, u16 hw_buffer_size)
759 if (!g.dim_is_initialized || !ch)
760 return DIM_ERR_DRIVER_NOT_INITIALIZED;
762 if (!check_channel_address(ch_address))
763 return DIM_INIT_ERR_CHANNEL_ADDRESS;
765 ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
766 ch->dbr_addr = alloc_dbr(ch->dbr_size);
767 if (ch->dbr_addr >= DBR_SIZE)
768 return DIM_INIT_ERR_OUT_OF_MEMORY;
770 channel_init(ch, ch_address / 2);
772 dim2_configure_channel(ch->addr, type, is_tx,
773 ch->dbr_addr, ch->dbr_size, 0, false);
778 void dim_service_mlb_int_irq(void)
780 dimcb_io_write(&g.dim2->MS0, 0);
781 dimcb_io_write(&g.dim2->MS1, 0);
784 u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
786 return norm_ctrl_async_buffer_size(buf_size);
790 * Retrieves maximal possible correct buffer size for isochronous data type
791 * conform to given packet length and not bigger than given buffer size.
793 * Returns non-zero correct buffer size or zero by error.
795 u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
797 if (!check_packet_length(packet_length))
800 return norm_isoc_buffer_size(buf_size, packet_length);
804 * Retrieves maximal possible correct buffer size for synchronous data type
805 * conform to given bytes per frame and not bigger than given buffer size.
807 * Returns non-zero correct buffer size or zero by error.
809 u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
811 if (!check_bytes_per_frame(bytes_per_frame))
814 return norm_sync_buffer_size(buf_size, bytes_per_frame);
817 u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
820 return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
824 u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
827 u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
830 if (is_tx && !g.atx_dbr.ch_addr) {
831 g.atx_dbr.ch_addr = ch->addr;
832 dbrcnt_init(ch->addr, ch->dbr_size);
833 dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
839 u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
842 if (!g.dim_is_initialized || !ch)
843 return DIM_ERR_DRIVER_NOT_INITIALIZED;
845 if (!check_channel_address(ch_address))
846 return DIM_INIT_ERR_CHANNEL_ADDRESS;
848 if (!check_packet_length(packet_length))
849 return DIM_ERR_BAD_CONFIG;
851 ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
852 ch->dbr_addr = alloc_dbr(ch->dbr_size);
853 if (ch->dbr_addr >= DBR_SIZE)
854 return DIM_INIT_ERR_OUT_OF_MEMORY;
856 isoc_init(ch, ch_address / 2, packet_length);
858 dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
859 ch->dbr_size, packet_length, false);
864 u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
867 u16 bd_factor = g.fcnt + 2;
869 if (!g.dim_is_initialized || !ch)
870 return DIM_ERR_DRIVER_NOT_INITIALIZED;
872 if (!check_channel_address(ch_address))
873 return DIM_INIT_ERR_CHANNEL_ADDRESS;
875 if (!check_bytes_per_frame(bytes_per_frame))
876 return DIM_ERR_BAD_CONFIG;
878 ch->dbr_size = bytes_per_frame << bd_factor;
879 ch->dbr_addr = alloc_dbr(ch->dbr_size);
880 if (ch->dbr_addr >= DBR_SIZE)
881 return DIM_INIT_ERR_OUT_OF_MEMORY;
883 sync_init(ch, ch_address / 2, bytes_per_frame);
885 dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
886 dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
887 ch->dbr_addr, ch->dbr_size, 0, true);
892 u8 dim_destroy_channel(struct dim_channel *ch)
894 if (!g.dim_is_initialized || !ch)
895 return DIM_ERR_DRIVER_NOT_INITIALIZED;
897 if (ch->addr == g.atx_dbr.ch_addr) {
898 dimcb_io_write(&g.dim2->MIEN, 0);
899 g.atx_dbr.ch_addr = 0;
902 dim2_clear_channel(ch->addr);
903 if (ch->dbr_addr < DBR_SIZE)
904 free_dbr(ch->dbr_addr, ch->dbr_size);
905 ch->dbr_addr = DBR_SIZE;
910 void dim_service_ahb_int_irq(struct dim_channel *const *channels)
914 if (!g.dim_is_initialized) {
915 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
916 "DIM is not initialized");
921 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
926 * Use while-loop and a flag to make sure the age is changed back at
927 * least once, otherwise the interrupt may never come if CPU generates
928 * interrupt on changing age.
929 * This cycle runs not more than number of channels, because
930 * channel_service_interrupt() routine doesn't start the channel again.
933 struct dim_channel *const *ch = channels;
935 state_changed = false;
938 state_changed |= channel_service_interrupt(*ch);
941 } while (state_changed);
944 u8 dim_service_channel(struct dim_channel *ch)
946 if (!g.dim_is_initialized || !ch)
947 return DIM_ERR_DRIVER_NOT_INITIALIZED;
949 return channel_service(ch);
952 struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
953 struct dim_ch_state_t *state_ptr)
955 if (!ch || !state_ptr)
958 state_ptr->ready = ch->state.level < 2;
959 state_ptr->done_buffers = ch->done_sw_buffers_number;
964 bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
968 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
971 return channel_start(ch, buffer_addr, buffer_size);
974 bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
977 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
980 return channel_detach_buffers(ch, buffers_number);