1 /* SPDX-License-Identifier: BSD-3-Clause */
3 * Copyright (c) 2020, MIPI Alliance, Inc.
5 * Author: Nicolas Pitre <npitre@baylibre.com>
14 /* Handy logging macro to save on line length */
15 #define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
17 /* 32-bit word aware bit and mask macros */
18 #define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
19 #define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
20 #define W2_MASK(h, l) GENMASK((h) - 64, (l) - 64)
21 #define W3_MASK(h, l) GENMASK((h) - 96, (l) - 96)
23 /* Same for single bit macros (trailing _ to align with W*_MASK width) */
24 #define W0_BIT_(x) BIT((x) - 0)
25 #define W1_BIT_(x) BIT((x) - 32)
26 #define W2_BIT_(x) BIT((x) - 64)
27 #define W3_BIT_(x) BIT((x) - 96)
32 /* Our main structure */
34 struct i3c_master_controller master;
35 void __iomem *base_regs;
36 void __iomem *DAT_regs;
37 void __iomem *DCT_regs;
38 void __iomem *RHS_regs;
39 void __iomem *PIO_regs;
40 void __iomem *EXTCAPS_regs;
41 void __iomem *AUTOCMD_regs;
42 void __iomem *DEBUG_regs;
43 const struct hci_io_ops *io;
45 const struct hci_cmd_ops *cmd;
46 atomic_t next_cmd_tid;
49 unsigned int DAT_entries;
50 unsigned int DAT_entry_size;
52 unsigned int DCT_entries;
53 unsigned int DCT_entry_size;
58 u32 vendor_version_id;
59 u32 vendor_product_id;
65 * Structure to represent a master initiated transfer.
66 * The rnw, data and data_len fields must be initialized before calling any
67 * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and
68 * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can
69 * be augmented with CMD_0_ROC and/or CMD_0_TOC.
70 * The completion field needs to be initialized before queueing with
71 * hci->io->queue_xfer(), and requires CMD_0_ROC to be set.
78 unsigned int data_len;
80 struct completion *completion;
84 struct hci_xfer *next_xfer;
85 struct hci_xfer *next_data;
86 struct hci_xfer *next_resp;
87 unsigned int data_left;
88 u32 data_word_before_partial;
99 static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
101 return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL);
104 static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
110 /* This abstracts PIO vs DMA operations */
112 bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
113 int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
114 bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
115 int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
116 const struct i3c_ibi_setup *req);
117 void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
118 void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
119 struct i3c_ibi_slot *slot);
120 int (*init)(struct i3c_hci *hci);
121 void (*cleanup)(struct i3c_hci *hci);
124 extern const struct hci_io_ops mipi_i3c_hci_pio;
125 extern const struct hci_io_ops mipi_i3c_hci_dma;
128 /* Our per device master private data */
129 struct i3c_hci_dev_data {
136 #define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
139 /* global functions */
140 void mipi_i3c_hci_resume(struct i3c_hci *hci);
141 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
142 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);