GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / i3c / master / mipi-i3c-hci / cmd_v2.c
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * I3C HCI v2.0 Command Descriptor Handling
8  *
9  * Note: The I3C HCI v2.0 spec is still in flux. The code here will change.
10  */
11
12 #include <linux/bitfield.h>
13 #include <linux/i3c/master.h>
14
15 #include "hci.h"
16 #include "cmd.h"
17 #include "xfer_mode_rate.h"
18
19
20 /*
21  * Unified Data Transfer Command
22  */
23
24 #define CMD_0_ATTR_U                    FIELD_PREP(CMD_0_ATTR, 0x4)
25
26 #define CMD_U3_HDR_TSP_ML_CTRL(v)       FIELD_PREP(W3_MASK(107, 104), v)
27 #define CMD_U3_IDB4(v)                  FIELD_PREP(W3_MASK(103,  96), v)
28 #define CMD_U3_HDR_CMD(v)               FIELD_PREP(W3_MASK(103,  96), v)
29 #define CMD_U2_IDB3(v)                  FIELD_PREP(W2_MASK( 95,  88), v)
30 #define CMD_U2_HDR_BT(v)                FIELD_PREP(W2_MASK( 95,  88), v)
31 #define CMD_U2_IDB2(v)                  FIELD_PREP(W2_MASK( 87,  80), v)
32 #define CMD_U2_BT_CMD2(v)               FIELD_PREP(W2_MASK( 87,  80), v)
33 #define CMD_U2_IDB1(v)                  FIELD_PREP(W2_MASK( 79,  72), v)
34 #define CMD_U2_BT_CMD1(v)               FIELD_PREP(W2_MASK( 79,  72), v)
35 #define CMD_U2_IDB0(v)                  FIELD_PREP(W2_MASK( 71,  64), v)
36 #define CMD_U2_BT_CMD0(v)               FIELD_PREP(W2_MASK( 71,  64), v)
37 #define CMD_U1_ERR_HANDLING(v)          FIELD_PREP(W1_MASK( 63,  62), v)
38 #define CMD_U1_ADD_FUNC(v)              FIELD_PREP(W1_MASK( 61,  56), v)
39 #define CMD_U1_COMBO_XFER                          W1_BIT_( 55)
40 #define CMD_U1_DATA_LENGTH(v)           FIELD_PREP(W1_MASK( 53,  32), v)
41 #define CMD_U0_TOC                                 W0_BIT_( 31)
42 #define CMD_U0_ROC                                 W0_BIT_( 30)
43 #define CMD_U0_MAY_YIELD                           W0_BIT_( 29)
44 #define CMD_U0_NACK_RCNT(v)             FIELD_PREP(W0_MASK( 28,  27), v)
45 #define CMD_U0_IDB_COUNT(v)             FIELD_PREP(W0_MASK( 26,  24), v)
46 #define CMD_U0_MODE_INDEX(v)            FIELD_PREP(W0_MASK( 22,  18), v)
47 #define CMD_U0_XFER_RATE(v)             FIELD_PREP(W0_MASK( 17,  15), v)
48 #define CMD_U0_DEV_ADDRESS(v)           FIELD_PREP(W0_MASK( 14,   8), v)
49 #define CMD_U0_RnW                                 W0_BIT_(  7)
50 #define CMD_U0_TID(v)                   FIELD_PREP(W0_MASK(  6,   3), v)
51
52 /*
53  * Address Assignment Command
54  */
55
56 #define CMD_0_ATTR_A                    FIELD_PREP(CMD_0_ATTR, 0x2)
57
58 #define CMD_A1_DATA_LENGTH(v)           FIELD_PREP(W1_MASK( 53,  32), v)
59 #define CMD_A0_TOC                                 W0_BIT_( 31)
60 #define CMD_A0_ROC                                 W0_BIT_( 30)
61 #define CMD_A0_XFER_RATE(v)             FIELD_PREP(W0_MASK( 17,  15), v)
62 #define CMD_A0_ASSIGN_ADDRESS(v)        FIELD_PREP(W0_MASK( 14,   8), v)
63 #define CMD_A0_TID(v)                   FIELD_PREP(W0_MASK(  6,   3), v)
64
65
66 static unsigned int get_i3c_rate_idx(struct i3c_hci *hci)
67 {
68         struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
69
70         if (bus->scl_rate.i3c >= 12000000)
71                 return XFERRATE_I3C_SDR0;
72         if (bus->scl_rate.i3c > 8000000)
73                 return XFERRATE_I3C_SDR1;
74         if (bus->scl_rate.i3c > 6000000)
75                 return XFERRATE_I3C_SDR2;
76         if (bus->scl_rate.i3c > 4000000)
77                 return XFERRATE_I3C_SDR3;
78         if (bus->scl_rate.i3c > 2000000)
79                 return XFERRATE_I3C_SDR4;
80         return XFERRATE_I3C_SDR_FM_FMP;
81 }
82
83 static unsigned int get_i2c_rate_idx(struct i3c_hci *hci)
84 {
85         struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
86
87         if (bus->scl_rate.i2c >= 1000000)
88                 return XFERRATE_I2C_FMP;
89         return XFERRATE_I2C_FM;
90 }
91
92 static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
93                                          struct hci_xfer *xfer,
94                                          u8 addr, unsigned int mode,
95                                          unsigned int rate)
96 {
97         u8 *data = xfer->data;
98         unsigned int data_len = xfer->data_len;
99         bool rnw = xfer->rnw;
100
101         xfer->cmd_tid = hci_get_tid();
102
103         if (!rnw && data_len <= 5) {
104                 xfer->cmd_desc[0] =
105                         CMD_0_ATTR_U |
106                         CMD_U0_TID(xfer->cmd_tid) |
107                         CMD_U0_DEV_ADDRESS(addr) |
108                         CMD_U0_XFER_RATE(rate) |
109                         CMD_U0_MODE_INDEX(mode) |
110                         CMD_U0_IDB_COUNT(data_len);
111                 xfer->cmd_desc[1] =
112                         CMD_U1_DATA_LENGTH(0);
113                 xfer->cmd_desc[2] = 0;
114                 xfer->cmd_desc[3] = 0;
115                 switch (data_len) {
116                 case 5:
117                         xfer->cmd_desc[3] |= CMD_U3_IDB4(data[4]);
118                         fallthrough;
119                 case 4:
120                         xfer->cmd_desc[2] |= CMD_U2_IDB3(data[3]);
121                         fallthrough;
122                 case 3:
123                         xfer->cmd_desc[2] |= CMD_U2_IDB2(data[2]);
124                         fallthrough;
125                 case 2:
126                         xfer->cmd_desc[2] |= CMD_U2_IDB1(data[1]);
127                         fallthrough;
128                 case 1:
129                         xfer->cmd_desc[2] |= CMD_U2_IDB0(data[0]);
130                         fallthrough;
131                 case 0:
132                         break;
133                 }
134                 /* we consumed all the data with the cmd descriptor */
135                 xfer->data = NULL;
136         } else {
137                 xfer->cmd_desc[0] =
138                         CMD_0_ATTR_U |
139                         CMD_U0_TID(xfer->cmd_tid) |
140                         (rnw ? CMD_U0_RnW : 0) |
141                         CMD_U0_DEV_ADDRESS(addr) |
142                         CMD_U0_XFER_RATE(rate) |
143                         CMD_U0_MODE_INDEX(mode);
144                 xfer->cmd_desc[1] =
145                         CMD_U1_DATA_LENGTH(data_len);
146                 xfer->cmd_desc[2] = 0;
147                 xfer->cmd_desc[3] = 0;
148         }
149 }
150
151 static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
152                                u8 ccc_addr, u8 ccc_cmd, bool raw)
153 {
154         unsigned int mode = XFERMODE_IDX_I3C_SDR;
155         unsigned int rate = get_i3c_rate_idx(hci);
156         u8 *data = xfer->data;
157         unsigned int data_len = xfer->data_len;
158         bool rnw = xfer->rnw;
159
160         if (raw && ccc_addr != I3C_BROADCAST_ADDR) {
161                 hci_cmd_v2_prep_private_xfer(hci, xfer, ccc_addr, mode, rate);
162                 return 0;
163         }
164
165         xfer->cmd_tid = hci_get_tid();
166
167         if (!rnw && data_len <= 4) {
168                 xfer->cmd_desc[0] =
169                         CMD_0_ATTR_U |
170                         CMD_U0_TID(xfer->cmd_tid) |
171                         CMD_U0_DEV_ADDRESS(ccc_addr) |
172                         CMD_U0_XFER_RATE(rate) |
173                         CMD_U0_MODE_INDEX(mode) |
174                         CMD_U0_IDB_COUNT(data_len + (!raw ? 0 : 1));
175                 xfer->cmd_desc[1] =
176                         CMD_U1_DATA_LENGTH(0);
177                 xfer->cmd_desc[2] =
178                         CMD_U2_IDB0(ccc_cmd);
179                 xfer->cmd_desc[3] = 0;
180                 switch (data_len) {
181                 case 4:
182                         xfer->cmd_desc[3] |= CMD_U3_IDB4(data[3]);
183                         fallthrough;
184                 case 3:
185                         xfer->cmd_desc[2] |= CMD_U2_IDB3(data[2]);
186                         fallthrough;
187                 case 2:
188                         xfer->cmd_desc[2] |= CMD_U2_IDB2(data[1]);
189                         fallthrough;
190                 case 1:
191                         xfer->cmd_desc[2] |= CMD_U2_IDB1(data[0]);
192                         fallthrough;
193                 case 0:
194                         break;
195                 }
196                 /* we consumed all the data with the cmd descriptor */
197                 xfer->data = NULL;
198         } else {
199                 xfer->cmd_desc[0] =
200                         CMD_0_ATTR_U |
201                         CMD_U0_TID(xfer->cmd_tid) |
202                         (rnw ? CMD_U0_RnW : 0) |
203                         CMD_U0_DEV_ADDRESS(ccc_addr) |
204                         CMD_U0_XFER_RATE(rate) |
205                         CMD_U0_MODE_INDEX(mode) |
206                         CMD_U0_IDB_COUNT(!raw ? 0 : 1);
207                 xfer->cmd_desc[1] =
208                         CMD_U1_DATA_LENGTH(data_len);
209                 xfer->cmd_desc[2] =
210                         CMD_U2_IDB0(ccc_cmd);
211                 xfer->cmd_desc[3] = 0;
212         }
213
214         return 0;
215 }
216
217 static void hci_cmd_v2_prep_i3c_xfer(struct i3c_hci *hci,
218                                      struct i3c_dev_desc *dev,
219                                      struct hci_xfer *xfer)
220 {
221         unsigned int mode = XFERMODE_IDX_I3C_SDR;
222         unsigned int rate = get_i3c_rate_idx(hci);
223         u8 addr = dev->info.dyn_addr;
224
225         hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
226 }
227
228 static void hci_cmd_v2_prep_i2c_xfer(struct i3c_hci *hci,
229                                      struct i2c_dev_desc *dev,
230                                      struct hci_xfer *xfer)
231 {
232         unsigned int mode = XFERMODE_IDX_I2C;
233         unsigned int rate = get_i2c_rate_idx(hci);
234         u8 addr = dev->addr;
235
236         hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
237 }
238
239 static int hci_cmd_v2_daa(struct i3c_hci *hci)
240 {
241         struct hci_xfer *xfer;
242         int ret;
243         u8 next_addr = 0;
244         u32 device_id[2];
245         u64 pid;
246         unsigned int dcr, bcr;
247         DECLARE_COMPLETION_ONSTACK(done);
248
249         xfer = hci_alloc_xfer(2);
250         if (!xfer)
251                 return -ENOMEM;
252
253         xfer[0].data = &device_id;
254         xfer[0].data_len = 8;
255         xfer[0].rnw = true;
256         xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
257         xfer[1].completion = &done;
258
259         for (;;) {
260                 ret = i3c_master_get_free_addr(&hci->master, next_addr);
261                 if (ret < 0)
262                         break;
263                 next_addr = ret;
264                 DBG("next_addr = 0x%02x", next_addr);
265                 xfer[0].cmd_tid = hci_get_tid();
266                 xfer[0].cmd_desc[0] =
267                         CMD_0_ATTR_A |
268                         CMD_A0_TID(xfer[0].cmd_tid) |
269                         CMD_A0_ROC;
270                 xfer[1].cmd_tid = hci_get_tid();
271                 xfer[1].cmd_desc[0] =
272                         CMD_0_ATTR_A |
273                         CMD_A0_TID(xfer[1].cmd_tid) |
274                         CMD_A0_ASSIGN_ADDRESS(next_addr) |
275                         CMD_A0_ROC |
276                         CMD_A0_TOC;
277                 hci->io->queue_xfer(hci, xfer, 2);
278                 if (!wait_for_completion_timeout(&done, HZ) &&
279                     hci->io->dequeue_xfer(hci, xfer, 2)) {
280                         ret = -ETIME;
281                         break;
282                 }
283                 if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
284                         ret = 0;  /* no more devices to be assigned */
285                         break;
286                 }
287                 if (RESP_STATUS(xfer[1].response) != RESP_SUCCESS) {
288                         ret = -EIO;
289                         break;
290                 }
291
292                 pid = FIELD_GET(W1_MASK(47, 32), device_id[1]);
293                 pid = (pid << 32) | device_id[0];
294                 bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
295                 dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
296                 DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
297                     next_addr, pid, dcr, bcr);
298                 /*
299                  * TODO: Extend the subsystem layer to allow for registering
300                  * new device and provide BCR/DCR/PID at the same time.
301                  */
302                 ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
303                 if (ret)
304                         break;
305         }
306
307         hci_free_xfer(xfer, 2);
308         return ret;
309 }
310
311 const struct hci_cmd_ops mipi_i3c_hci_cmd_v2 = {
312         .prep_ccc               = hci_cmd_v2_prep_ccc,
313         .prep_i3c_xfer          = hci_cmd_v2_prep_i3c_xfer,
314         .prep_i2c_xfer          = hci_cmd_v2_prep_i2c_xfer,
315         .perform_daa            = hci_cmd_v2_daa,
316 };