1 // SPDX-License-Identifier: GPL-2.0+
3 * NVIDIA Tegra XUSB device mode controller
5 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
6 * Copyright (c) 2015, Google Inc.
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/phy/phy.h>
21 #include <linux/phy/tegra/xusb.h>
22 #include <linux/pm_domain.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/usb/ch9.h>
28 #include <linux/usb/gadget.h>
29 #include <linux/usb/otg.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/phy.h>
32 #include <linux/workqueue.h>
34 /* XUSB_DEV registers */
36 #define DB_TARGET_MASK GENMASK(15, 8)
37 #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
38 #define DB_STREAMID_MASK GENMASK(31, 16)
39 #define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
41 #define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
42 #define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
43 #define ERSTXBALO(x) (0x010 + 8 * (x))
44 #define ERSTXBAHI(x) (0x014 + 8 * (x))
46 #define ERDPLO_EHB BIT(3)
49 #define EREPLO_ECS BIT(0)
50 #define EREPLO_SEGI BIT(1)
53 #define CTRL_RUN BIT(0)
54 #define CTRL_LSE BIT(1)
55 #define CTRL_IE BIT(4)
56 #define CTRL_SMI_EVT BIT(5)
57 #define CTRL_SMI_DSE BIT(6)
58 #define CTRL_EWE BIT(7)
59 #define CTRL_DEVADDR_MASK GENMASK(30, 24)
60 #define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
61 #define CTRL_ENABLE BIT(31)
66 #define RT_IMOD_IMODI_MASK GENMASK(15, 0)
67 #define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
68 #define RT_IMOD_IMODC_MASK GENMASK(31, 16)
69 #define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
71 #define PORTSC_CCS BIT(0)
72 #define PORTSC_PED BIT(1)
73 #define PORTSC_PR BIT(4)
74 #define PORTSC_PLS_SHIFT 5
75 #define PORTSC_PLS_MASK GENMASK(8, 5)
76 #define PORTSC_PLS_U0 0x0
77 #define PORTSC_PLS_U2 0x2
78 #define PORTSC_PLS_U3 0x3
79 #define PORTSC_PLS_DISABLED 0x4
80 #define PORTSC_PLS_RXDETECT 0x5
81 #define PORTSC_PLS_INACTIVE 0x6
82 #define PORTSC_PLS_RESUME 0xf
83 #define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
84 #define PORTSC_PS_SHIFT 10
85 #define PORTSC_PS_MASK GENMASK(13, 10)
86 #define PORTSC_PS_UNDEFINED 0x0
87 #define PORTSC_PS_FS 0x1
88 #define PORTSC_PS_LS 0x2
89 #define PORTSC_PS_HS 0x3
90 #define PORTSC_PS_SS 0x4
91 #define PORTSC_LWS BIT(16)
92 #define PORTSC_CSC BIT(17)
93 #define PORTSC_WRC BIT(19)
94 #define PORTSC_PRC BIT(21)
95 #define PORTSC_PLC BIT(22)
96 #define PORTSC_CEC BIT(23)
97 #define PORTSC_WPR BIT(30)
98 #define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
99 PORTSC_PLC | PORTSC_CEC)
102 #define MFINDEX 0x048
103 #define MFINDEX_FRAME_SHIFT 3
104 #define MFINDEX_FRAME_MASK GENMASK(13, 3)
106 #define PORTPM_L1S_MASK GENMASK(1, 0)
107 #define PORTPM_L1S_DROP 0x0
108 #define PORTPM_L1S_ACCEPT 0x1
109 #define PORTPM_L1S_NYET 0x2
110 #define PORTPM_L1S_STALL 0x3
111 #define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
112 #define PORTPM_RWE BIT(3)
113 #define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
114 #define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
115 #define PORTPM_FLA BIT(24)
116 #define PORTPM_VBA BIT(25)
117 #define PORTPM_WOC BIT(26)
118 #define PORTPM_WOD BIT(27)
119 #define PORTPM_U1E BIT(28)
120 #define PORTPM_U2E BIT(29)
121 #define PORTPM_FRWE BIT(30)
122 #define PORTPM_PNG_CYA BIT(31)
123 #define EP_HALT 0x050
124 #define EP_PAUSE 0x054
125 #define EP_RELOAD 0x058
126 #define EP_STCHG 0x05c
127 #define DEVNOTIF_LO 0x064
128 #define DEVNOTIF_LO_TRIG BIT(0)
129 #define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
130 #define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
131 #define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
132 #define DEVNOTIF_HI 0x068
133 #define PORTHALT 0x06c
134 #define PORTHALT_HALT_LTSSM BIT(0)
135 #define PORTHALT_HALT_REJECT BIT(1)
136 #define PORTHALT_STCHG_REQ BIT(20)
137 #define PORTHALT_STCHG_INTR_EN BIT(24)
138 #define PORT_TM 0x070
139 #define EP_THREAD_ACTIVE 0x074
140 #define EP_STOPPED 0x078
141 #define HSFSPI_COUNT0 0x100
142 #define HSFSPI_COUNT13 0x134
143 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
144 #define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
145 HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
147 #define SSPX_CORE_CNT0 0x610
148 #define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
149 #define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
150 #define SSPX_CORE_CNT30 0x688
151 #define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
152 #define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
153 SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
154 #define SSPX_CORE_CNT32 0x690
155 #define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
156 #define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
157 SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
158 #define SSPX_CORE_CNT56 0x6fc
159 #define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
160 #define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
161 SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
162 #define SSPX_CORE_CNT57 0x700
163 #define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
164 #define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
165 SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
166 #define SSPX_CORE_CNT65 0x720
167 #define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
168 #define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
169 SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
170 #define SSPX_CORE_CNT66 0x724
171 #define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
172 #define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
173 SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
174 #define SSPX_CORE_CNT67 0x728
175 #define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
176 #define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
177 SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
178 #define SSPX_CORE_CNT72 0x73c
179 #define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
180 #define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
181 SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
182 #define SSPX_CORE_PADCTL4 0x750
183 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
184 #define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
185 SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
186 #define BLCG_DFPCI BIT(0)
187 #define BLCG_UFPCI BIT(1)
188 #define BLCG_FE BIT(2)
189 #define BLCG_COREPLL_PWRDN BIT(8)
190 #define BLCG_IOPLL_0_PWRDN BIT(9)
191 #define BLCG_IOPLL_1_PWRDN BIT(10)
192 #define BLCG_IOPLL_2_PWRDN BIT(11)
193 #define BLCG_ALL 0x1ff
194 #define CFG_DEV_SSPI_XFER 0x858
195 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
196 #define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
197 CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
198 #define CFG_DEV_FE 0x85c
199 #define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
200 #define CFG_DEV_FE_PORTREGSEL_SS_PI 1
201 #define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
202 #define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
203 #define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
206 #define XUSB_DEV_CFG_1 0x004
207 #define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
208 #define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
209 #define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
210 #define XUSB_DEV_CFG_4 0x010
211 #define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
212 #define XUSB_DEV_CFG_5 0x014
215 #define XUSB_DEV_CONFIGURATION_0 0x180
216 #define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
217 #define XUSB_DEV_INTR_MASK_0 0x188
218 #define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
220 struct tegra_xudc_ep_context {
229 #define EP_STATE_DISABLED 0
230 #define EP_STATE_RUNNING 1
231 #define EP_STATE_HALTED 2
232 #define EP_STATE_STOPPED 3
233 #define EP_STATE_ERROR 4
235 #define EP_TYPE_INVALID 0
236 #define EP_TYPE_ISOCH_OUT 1
237 #define EP_TYPE_BULK_OUT 2
238 #define EP_TYPE_INTERRUPT_OUT 3
239 #define EP_TYPE_CONTROL 4
240 #define EP_TYPE_ISCOH_IN 5
241 #define EP_TYPE_BULK_IN 6
242 #define EP_TYPE_INTERRUPT_IN 7
244 #define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
245 static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
247 return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
250 ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
254 tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
255 tmp |= (val & (mask)) << (shift); \
256 ctx->member = cpu_to_le32(tmp); \
259 BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
260 BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
261 BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
262 BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
263 BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
264 BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
265 BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
266 BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
267 BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
268 BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
269 BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
270 BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
271 BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
272 BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
273 BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
274 BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
275 BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
276 BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
277 BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
278 BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
279 BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
280 BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
281 BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
282 BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
284 static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
286 return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
287 (ep_ctx_read_deq_lo(ctx) << 4);
291 ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
293 ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
294 ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
297 struct tegra_xudc_trb {
304 #define TRB_TYPE_RSVD 0
305 #define TRB_TYPE_NORMAL 1
306 #define TRB_TYPE_SETUP_STAGE 2
307 #define TRB_TYPE_DATA_STAGE 3
308 #define TRB_TYPE_STATUS_STAGE 4
309 #define TRB_TYPE_ISOCH 5
310 #define TRB_TYPE_LINK 6
311 #define TRB_TYPE_TRANSFER_EVENT 32
312 #define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
313 #define TRB_TYPE_STREAM 48
314 #define TRB_TYPE_SETUP_PACKET_EVENT 63
316 #define TRB_CMPL_CODE_INVALID 0
317 #define TRB_CMPL_CODE_SUCCESS 1
318 #define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
319 #define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
320 #define TRB_CMPL_CODE_USB_TRANS_ERR 4
321 #define TRB_CMPL_CODE_TRB_ERR 5
322 #define TRB_CMPL_CODE_STALL 6
323 #define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
324 #define TRB_CMPL_CODE_SHORT_PACKET 13
325 #define TRB_CMPL_CODE_RING_UNDERRUN 14
326 #define TRB_CMPL_CODE_RING_OVERRUN 15
327 #define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
328 #define TRB_CMPL_CODE_STOPPED 26
329 #define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
330 #define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
331 #define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
332 #define TRB_CMPL_CODE_HOST_REJECTED 221
333 #define TRB_CMPL_CODE_CTRL_DIR_ERR 222
334 #define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
336 #define BUILD_TRB_RW(name, member, shift, mask) \
337 static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
339 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
342 trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
346 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
347 tmp |= (val & (mask)) << (shift); \
348 trb->member = cpu_to_le32(tmp); \
351 BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
352 BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
353 BUILD_TRB_RW(seq_num, status, 0, 0xffff)
354 BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
355 BUILD_TRB_RW(td_size, status, 17, 0x1f)
356 BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
357 BUILD_TRB_RW(cycle, control, 0, 0x1)
358 BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
359 BUILD_TRB_RW(isp, control, 2, 0x1)
360 BUILD_TRB_RW(chain, control, 4, 0x1)
361 BUILD_TRB_RW(ioc, control, 5, 0x1)
362 BUILD_TRB_RW(type, control, 10, 0x3f)
363 BUILD_TRB_RW(stream_id, control, 16, 0xffff)
364 BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
365 BUILD_TRB_RW(tlbpc, control, 16, 0xf)
366 BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
367 BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
368 BUILD_TRB_RW(sia, control, 31, 0x1)
370 static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
372 return ((u64)trb_read_data_hi(trb) << 32) |
373 trb_read_data_lo(trb);
376 static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
378 trb_write_data_lo(trb, lower_32_bits(addr));
379 trb_write_data_hi(trb, upper_32_bits(addr));
382 struct tegra_xudc_request {
383 struct usb_request usb_req;
386 unsigned int trbs_queued;
387 unsigned int trbs_needed;
390 struct tegra_xudc_trb *first_trb;
391 struct tegra_xudc_trb *last_trb;
393 struct list_head list;
396 struct tegra_xudc_ep {
397 struct tegra_xudc *xudc;
398 struct usb_ep usb_ep;
402 struct tegra_xudc_ep_context *context;
404 #define XUDC_TRANSFER_RING_SIZE 64
405 struct tegra_xudc_trb *transfer_ring;
406 dma_addr_t transfer_ring_phys;
408 unsigned int enq_ptr;
409 unsigned int deq_ptr;
412 bool stream_rejected;
414 struct list_head queue;
415 const struct usb_endpoint_descriptor *desc;
416 const struct usb_ss_ep_comp_descriptor *comp_desc;
419 struct tegra_xudc_sel_timing {
426 enum tegra_xudc_setup_state {
434 struct tegra_xudc_setup_packet {
435 struct usb_ctrlrequest ctrl_req;
436 unsigned int seq_num;
439 struct tegra_xudc_save_regs {
446 const struct tegra_xudc_soc *soc;
447 struct tegra_xusb_padctl *padctl;
451 struct usb_gadget gadget;
452 struct usb_gadget_driver *driver;
454 #define XUDC_NR_EVENT_RINGS 2
455 #define XUDC_EVENT_RING_SIZE 4096
456 struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
457 dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
458 unsigned int event_ring_index;
459 unsigned int event_ring_deq_ptr;
462 #define XUDC_NR_EPS 32
463 struct tegra_xudc_ep ep[XUDC_NR_EPS];
464 struct tegra_xudc_ep_context *ep_context;
465 dma_addr_t ep_context_phys;
467 struct device *genpd_dev_device;
468 struct device *genpd_dev_ss;
469 struct device_link *genpd_dl_device;
470 struct device_link *genpd_dl_ss;
472 struct dma_pool *transfer_ring_pool;
474 bool queued_setup_packet;
475 struct tegra_xudc_setup_packet setup_packet;
476 enum tegra_xudc_setup_state setup_state;
481 struct tegra_xudc_sel_timing sel_timing;
482 u8 test_mode_pattern;
484 struct tegra_xudc_request *ep0_req;
488 unsigned int nr_enabled_eps;
489 unsigned int nr_isoch_eps;
491 unsigned int device_state;
492 unsigned int resume_state;
497 resource_size_t phys_base;
501 struct regulator_bulk_data *supplies;
503 struct clk_bulk_data *clks;
506 struct work_struct usb_role_sw_work;
508 struct phy **usb3_phy;
509 struct phy *curr_usb3_phy;
510 struct phy **utmi_phy;
511 struct phy *curr_utmi_phy;
513 struct tegra_xudc_save_regs saved_regs;
517 struct usb_phy **usbphy;
518 struct usb_phy *curr_usbphy;
519 struct notifier_block vbus_nb;
521 struct completion disconnect_complete;
525 #define TOGGLE_VBUS_WAIT_MS 100
526 struct delayed_work plc_reset_work;
529 struct delayed_work port_reset_war_work;
530 bool wait_for_sec_prc;
533 #define XUDC_TRB_MAX_BUFFER_SIZE 65536
534 #define XUDC_MAX_ISOCH_EPS 4
535 #define XUDC_INTERRUPT_MODERATION_US 0
537 static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
538 .bLength = USB_DT_ENDPOINT_SIZE,
539 .bDescriptorType = USB_DT_ENDPOINT,
540 .bEndpointAddress = 0,
541 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
542 .wMaxPacketSize = cpu_to_le16(64),
545 struct tegra_xudc_soc {
546 const char * const *supply_names;
547 unsigned int num_supplies;
548 const char * const *clock_names;
549 unsigned int num_clks;
550 unsigned int num_phys;
554 bool invalid_seq_num;
556 bool port_reset_quirk;
557 bool port_speed_quirk;
561 static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
563 return readl(xudc->fpci + offset);
566 static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
569 writel(val, xudc->fpci + offset);
572 static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
574 return readl(xudc->ipfs + offset);
577 static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
580 writel(val, xudc->ipfs + offset);
583 static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
585 return readl(xudc->base + offset);
588 static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
591 writel(val, xudc->base + offset);
594 static inline int xudc_readl_poll(struct tegra_xudc *xudc,
595 unsigned int offset, u32 mask, u32 val)
599 return readl_poll_timeout_atomic(xudc->base + offset, regval,
600 (regval & mask) == val, 1, 100);
603 static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
605 return container_of(gadget, struct tegra_xudc, gadget);
608 static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
610 return container_of(ep, struct tegra_xudc_ep, usb_ep);
613 static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
615 return container_of(req, struct tegra_xudc_request, usb_req);
618 static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
619 struct tegra_xudc_trb *trb)
622 "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
623 type, trb, trb->data_lo, trb->data_hi, trb->status,
627 static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
631 /* limit port speed to gen 1 */
632 val = xudc_readl(xudc, SSPX_CORE_CNT56);
633 val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
634 val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
635 xudc_writel(xudc, val, SSPX_CORE_CNT56);
637 val = xudc_readl(xudc, SSPX_CORE_CNT57);
638 val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
639 val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
640 xudc_writel(xudc, val, SSPX_CORE_CNT57);
642 val = xudc_readl(xudc, SSPX_CORE_CNT65);
643 val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
644 val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
645 xudc_writel(xudc, val, SSPX_CORE_CNT66);
647 val = xudc_readl(xudc, SSPX_CORE_CNT66);
648 val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
649 val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
650 xudc_writel(xudc, val, SSPX_CORE_CNT66);
652 val = xudc_readl(xudc, SSPX_CORE_CNT67);
653 val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
654 val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
655 xudc_writel(xudc, val, SSPX_CORE_CNT67);
657 val = xudc_readl(xudc, SSPX_CORE_CNT72);
658 val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
659 val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
660 xudc_writel(xudc, val, SSPX_CORE_CNT72);
663 static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
667 /* restore port speed to gen2 */
668 val = xudc_readl(xudc, SSPX_CORE_CNT56);
669 val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
670 val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
671 xudc_writel(xudc, val, SSPX_CORE_CNT56);
673 val = xudc_readl(xudc, SSPX_CORE_CNT57);
674 val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
675 val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
676 xudc_writel(xudc, val, SSPX_CORE_CNT57);
678 val = xudc_readl(xudc, SSPX_CORE_CNT65);
679 val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
680 val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
681 xudc_writel(xudc, val, SSPX_CORE_CNT66);
683 val = xudc_readl(xudc, SSPX_CORE_CNT66);
684 val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
685 val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
686 xudc_writel(xudc, val, SSPX_CORE_CNT66);
688 val = xudc_readl(xudc, SSPX_CORE_CNT67);
689 val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
690 val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
691 xudc_writel(xudc, val, SSPX_CORE_CNT67);
693 val = xudc_readl(xudc, SSPX_CORE_CNT72);
694 val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
695 val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
696 xudc_writel(xudc, val, SSPX_CORE_CNT72);
699 static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
703 pm_runtime_get_sync(xudc->dev);
705 err = phy_power_on(xudc->curr_utmi_phy);
707 dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
709 err = phy_power_on(xudc->curr_usb3_phy);
711 dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
713 dev_dbg(xudc->dev, "device mode on\n");
715 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
719 static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
721 bool connected = false;
725 dev_dbg(xudc->dev, "device mode off\n");
727 connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
729 reinit_completion(&xudc->disconnect_complete);
731 if (xudc->soc->port_speed_quirk)
732 tegra_xudc_restore_port_speed(xudc);
734 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
736 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
739 /* Direct link to U0 if disconnected in RESUME or U2. */
740 if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
741 (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
742 val = xudc_readl(xudc, PORTPM);
744 xudc_writel(xudc, val, PORTPM);
746 val = xudc_readl(xudc, PORTSC);
747 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
748 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
749 xudc_writel(xudc, val, PORTSC);
752 /* Wait for disconnect event. */
754 wait_for_completion(&xudc->disconnect_complete);
756 /* Make sure interrupt handler has completed before powergating. */
757 synchronize_irq(xudc->irq);
759 err = phy_power_off(xudc->curr_utmi_phy);
761 dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
763 err = phy_power_off(xudc->curr_usb3_phy);
765 dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
767 pm_runtime_put(xudc->dev);
770 static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
772 struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
775 if (xudc->device_mode)
776 tegra_xudc_device_mode_on(xudc);
778 tegra_xudc_device_mode_off(xudc);
781 static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
782 struct usb_phy *usbphy)
786 for (i = 0; i < xudc->soc->num_phys; i++) {
787 if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
791 dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
795 static int tegra_xudc_vbus_notify(struct notifier_block *nb,
796 unsigned long action, void *data)
798 struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
800 struct usb_phy *usbphy = (struct usb_phy *)data;
803 dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
805 if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
806 (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
807 dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
812 xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
815 phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
816 dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
819 if (!xudc->suspended && phy_index != -1) {
820 xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
821 xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
822 xudc->curr_usbphy = usbphy;
823 schedule_work(&xudc->usb_role_sw_work);
829 static void tegra_xudc_plc_reset_work(struct work_struct *work)
831 struct delayed_work *dwork = to_delayed_work(work);
832 struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
836 spin_lock_irqsave(&xudc->lock, flags);
838 if (xudc->wait_csc) {
839 u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
842 if (pls == PORTSC_PLS_INACTIVE) {
843 dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
844 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
846 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
849 xudc->wait_csc = false;
853 spin_unlock_irqrestore(&xudc->lock, flags);
856 static void tegra_xudc_port_reset_war_work(struct work_struct *work)
858 struct delayed_work *dwork = to_delayed_work(work);
859 struct tegra_xudc *xudc =
860 container_of(dwork, struct tegra_xudc, port_reset_war_work);
865 spin_lock_irqsave(&xudc->lock, flags);
867 if (xudc->device_mode && xudc->wait_for_sec_prc) {
868 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
870 dev_dbg(xudc->dev, "pls = %x\n", pls);
872 if (pls == PORTSC_PLS_DISABLED) {
873 dev_dbg(xudc->dev, "toggle vbus\n");
874 /* PRC doesn't complete in 100ms, toggle the vbus */
875 ret = tegra_phy_xusb_utmi_port_reset(
876 xudc->curr_utmi_phy);
878 xudc->wait_for_sec_prc = 0;
882 spin_unlock_irqrestore(&xudc->lock, flags);
885 static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
886 struct tegra_xudc_trb *trb)
890 index = trb - ep->transfer_ring;
892 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
895 return (ep->transfer_ring_phys + index * sizeof(*trb));
898 static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
901 struct tegra_xudc_trb *trb;
904 index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
906 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
909 trb = &ep->transfer_ring[index];
914 static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
916 xudc_writel(xudc, BIT(ep), EP_RELOAD);
917 xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
920 static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
924 val = xudc_readl(xudc, EP_PAUSE);
929 xudc_writel(xudc, val, EP_PAUSE);
931 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
933 xudc_writel(xudc, BIT(ep), EP_STCHG);
936 static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
940 val = xudc_readl(xudc, EP_PAUSE);
941 if (!(val & BIT(ep)))
945 xudc_writel(xudc, val, EP_PAUSE);
947 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
949 xudc_writel(xudc, BIT(ep), EP_STCHG);
952 static void ep_unpause_all(struct tegra_xudc *xudc)
956 val = xudc_readl(xudc, EP_PAUSE);
958 xudc_writel(xudc, 0, EP_PAUSE);
960 xudc_readl_poll(xudc, EP_STCHG, val, val);
962 xudc_writel(xudc, val, EP_STCHG);
965 static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
969 val = xudc_readl(xudc, EP_HALT);
973 xudc_writel(xudc, val, EP_HALT);
975 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
977 xudc_writel(xudc, BIT(ep), EP_STCHG);
980 static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
984 val = xudc_readl(xudc, EP_HALT);
985 if (!(val & BIT(ep)))
988 xudc_writel(xudc, val, EP_HALT);
990 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
992 xudc_writel(xudc, BIT(ep), EP_STCHG);
995 static void ep_unhalt_all(struct tegra_xudc *xudc)
999 val = xudc_readl(xudc, EP_HALT);
1002 xudc_writel(xudc, 0, EP_HALT);
1004 xudc_readl_poll(xudc, EP_STCHG, val, val);
1006 xudc_writel(xudc, val, EP_STCHG);
1009 static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
1011 xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
1012 xudc_writel(xudc, BIT(ep), EP_STOPPED);
1015 static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
1017 xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
1020 static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
1021 struct tegra_xudc_request *req, int status)
1023 struct tegra_xudc *xudc = ep->xudc;
1025 dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
1026 req, ep->index, status);
1028 if (likely(req->usb_req.status == -EINPROGRESS))
1029 req->usb_req.status = status;
1031 list_del_init(&req->list);
1033 if (usb_endpoint_xfer_control(ep->desc)) {
1034 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
1035 (xudc->setup_state ==
1038 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
1039 usb_endpoint_dir_in(ep->desc));
1042 spin_unlock(&xudc->lock);
1043 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
1044 spin_lock(&xudc->lock);
1047 static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
1049 struct tegra_xudc_request *req;
1051 while (!list_empty(&ep->queue)) {
1052 req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1054 tegra_xudc_req_done(ep, req, status);
1058 static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
1063 if (ep->deq_ptr > ep->enq_ptr)
1064 return ep->deq_ptr - ep->enq_ptr - 1;
1066 return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
1069 static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
1070 struct tegra_xudc_request *req,
1071 struct tegra_xudc_trb *trb,
1074 struct tegra_xudc *xudc = ep->xudc;
1075 dma_addr_t buf_addr;
1078 len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
1081 buf_addr = req->usb_req.dma + req->buf_queued;
1085 trb_write_data_ptr(trb, buf_addr);
1087 trb_write_transfer_len(trb, len);
1088 trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
1090 if (req->trbs_queued == req->trbs_needed - 1 ||
1091 (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
1092 trb_write_chain(trb, 0);
1094 trb_write_chain(trb, 1);
1096 trb_write_ioc(trb, ioc);
1098 if (usb_endpoint_dir_out(ep->desc) ||
1099 (usb_endpoint_xfer_control(ep->desc) &&
1100 (xudc->setup_state == DATA_STAGE_RECV)))
1101 trb_write_isp(trb, 1);
1103 trb_write_isp(trb, 0);
1105 if (usb_endpoint_xfer_control(ep->desc)) {
1106 if (xudc->setup_state == DATA_STAGE_XFER ||
1107 xudc->setup_state == DATA_STAGE_RECV)
1108 trb_write_type(trb, TRB_TYPE_DATA_STAGE);
1110 trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
1112 if (xudc->setup_state == DATA_STAGE_XFER ||
1113 xudc->setup_state == STATUS_STAGE_XFER)
1114 trb_write_data_stage_dir(trb, 1);
1116 trb_write_data_stage_dir(trb, 0);
1117 } else if (usb_endpoint_xfer_isoc(ep->desc)) {
1118 trb_write_type(trb, TRB_TYPE_ISOCH);
1119 trb_write_sia(trb, 1);
1120 trb_write_frame_id(trb, 0);
1121 trb_write_tlbpc(trb, 0);
1122 } else if (usb_ss_max_streams(ep->comp_desc)) {
1123 trb_write_type(trb, TRB_TYPE_STREAM);
1124 trb_write_stream_id(trb, req->usb_req.stream_id);
1126 trb_write_type(trb, TRB_TYPE_NORMAL);
1127 trb_write_stream_id(trb, 0);
1130 trb_write_cycle(trb, ep->pcs);
1133 req->buf_queued += len;
1135 dump_trb(xudc, "TRANSFER", trb);
1138 static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
1139 struct tegra_xudc_request *req)
1141 unsigned int i, count, available;
1142 bool wait_td = false;
1144 available = ep_available_trbs(ep);
1145 count = req->trbs_needed - req->trbs_queued;
1146 if (available < count) {
1148 ep->ring_full = true;
1152 * To generate zero-length packet on USB bus, SW needs schedule a
1153 * standalone zero-length TD. According to HW's behavior, SW needs
1154 * to schedule TDs in different ways for different endpoint types.
1156 * For control endpoint:
1157 * - Data stage TD (IOC = 1, CH = 0)
1158 * - Ring doorbell and wait transfer event
1159 * - Data stage TD for ZLP (IOC = 1, CH = 0)
1162 * For bulk and interrupt endpoints:
1163 * - Normal transfer TD (IOC = 0, CH = 0)
1164 * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
1168 if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
1171 if (!req->first_trb)
1172 req->first_trb = &ep->transfer_ring[ep->enq_ptr];
1174 for (i = 0; i < count; i++) {
1175 struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
1178 if ((i == count - 1) || (wait_td && i == count - 2))
1181 tegra_xudc_queue_one_trb(ep, req, trb, ioc);
1182 req->last_trb = trb;
1185 if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
1186 trb = &ep->transfer_ring[ep->enq_ptr];
1187 trb_write_cycle(trb, ep->pcs);
1199 static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
1201 struct tegra_xudc *xudc = ep->xudc;
1204 if (list_empty(&ep->queue))
1207 val = DB_TARGET(ep->index);
1208 if (usb_endpoint_xfer_control(ep->desc)) {
1209 val |= DB_STREAMID(xudc->setup_seq_num);
1210 } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
1211 struct tegra_xudc_request *req;
1213 /* Don't ring doorbell if the stream has been rejected. */
1214 if (ep->stream_rejected)
1217 req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1219 val |= DB_STREAMID(req->usb_req.stream_id);
1222 dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
1223 xudc_writel(xudc, val, DB);
1226 static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
1228 struct tegra_xudc_request *req;
1229 bool trbs_queued = false;
1231 list_for_each_entry(req, &ep->queue, list) {
1235 if (tegra_xudc_queue_trbs(ep, req) > 0)
1240 tegra_xudc_ep_ring_doorbell(ep);
1244 __tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
1246 struct tegra_xudc *xudc = ep->xudc;
1249 if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
1250 dev_err(xudc->dev, "control EP has pending transfers\n");
1254 if (usb_endpoint_xfer_control(ep->desc)) {
1255 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1256 (xudc->setup_state ==
1259 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1260 usb_endpoint_dir_in(ep->desc));
1264 dev_err(xudc->dev, "failed to map request: %d\n", err);
1268 req->first_trb = NULL;
1269 req->last_trb = NULL;
1270 req->buf_queued = 0;
1271 req->trbs_queued = 0;
1272 req->need_zlp = false;
1273 req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
1274 XUDC_TRB_MAX_BUFFER_SIZE);
1275 if (req->usb_req.length == 0)
1278 if (!usb_endpoint_xfer_isoc(ep->desc) &&
1279 req->usb_req.zero && req->usb_req.length &&
1280 ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
1282 req->need_zlp = true;
1285 req->usb_req.status = -EINPROGRESS;
1286 req->usb_req.actual = 0;
1288 list_add_tail(&req->list, &ep->queue);
1290 tegra_xudc_ep_kick_queue(ep);
1296 tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
1299 struct tegra_xudc_request *req;
1300 struct tegra_xudc_ep *ep;
1301 struct tegra_xudc *xudc;
1302 unsigned long flags;
1305 if (!usb_ep || !usb_req)
1308 ep = to_xudc_ep(usb_ep);
1309 req = to_xudc_req(usb_req);
1312 spin_lock_irqsave(&xudc->lock, flags);
1313 if (xudc->powergated || !ep->desc) {
1318 ret = __tegra_xudc_ep_queue(ep, req);
1320 spin_unlock_irqrestore(&xudc->lock, flags);
1325 static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
1326 struct tegra_xudc_request *req)
1328 struct tegra_xudc_trb *trb = req->first_trb;
1329 bool pcs_enq = trb_read_cycle(trb);
1333 * Clear out all the TRBs part of or after the cancelled request,
1334 * and must correct trb cycle bit to the last un-enqueued state.
1336 while (trb != &ep->transfer_ring[ep->enq_ptr]) {
1337 pcs = trb_read_cycle(trb);
1338 memset(trb, 0, sizeof(*trb));
1339 trb_write_cycle(trb, !pcs);
1342 if (trb_read_type(trb) == TRB_TYPE_LINK)
1343 trb = ep->transfer_ring;
1346 /* Requests will be re-queued at the start of the cancelled request. */
1347 ep->enq_ptr = req->first_trb - ep->transfer_ring;
1349 * Retrieve the correct cycle bit state from the first trb of
1350 * the cancelled request.
1353 ep->ring_full = false;
1354 list_for_each_entry_continue(req, &ep->queue, list) {
1355 req->usb_req.status = -EINPROGRESS;
1356 req->usb_req.actual = 0;
1358 req->first_trb = NULL;
1359 req->last_trb = NULL;
1360 req->buf_queued = 0;
1361 req->trbs_queued = 0;
1366 * Determine if the given TRB is in the range [first trb, last trb] for the
1369 static bool trb_in_request(struct tegra_xudc_ep *ep,
1370 struct tegra_xudc_request *req,
1371 struct tegra_xudc_trb *trb)
1373 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
1374 req->first_trb, req->last_trb, trb);
1376 if (trb >= req->first_trb && (trb <= req->last_trb ||
1377 req->last_trb < req->first_trb))
1380 if (trb < req->first_trb && trb <= req->last_trb &&
1381 req->last_trb < req->first_trb)
1388 * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
1389 * for the given endpoint and request.
1391 static bool trb_before_request(struct tegra_xudc_ep *ep,
1392 struct tegra_xudc_request *req,
1393 struct tegra_xudc_trb *trb)
1395 struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
1397 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1398 __func__, req->first_trb, req->last_trb, enq_trb, trb);
1400 if (trb < req->first_trb && (enq_trb <= trb ||
1401 req->first_trb < enq_trb))
1404 if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
1411 __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
1412 struct tegra_xudc_request *req)
1414 struct tegra_xudc *xudc = ep->xudc;
1415 struct tegra_xudc_request *r = NULL, *iter;
1416 struct tegra_xudc_trb *deq_trb;
1417 bool busy, kick_queue = false;
1420 /* Make sure the request is actually queued to this endpoint. */
1421 list_for_each_entry(iter, &ep->queue, list) {
1431 /* Request hasn't been queued in the transfer ring yet. */
1432 if (!req->trbs_queued) {
1433 tegra_xudc_req_done(ep, req, -ECONNRESET);
1437 /* Halt DMA for this endpoint. */
1438 if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
1439 ep_pause(xudc, ep->index);
1440 ep_wait_for_inactive(xudc, ep->index);
1443 deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
1444 /* Is the hardware processing the TRB at the dequeue pointer? */
1445 busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
1447 if (trb_in_request(ep, req, deq_trb) && busy) {
1449 * Request has been partially completed or it hasn't
1450 * started processing yet.
1454 squeeze_transfer_ring(ep, req);
1456 req->usb_req.actual = ep_ctx_read_edtla(ep->context);
1457 tegra_xudc_req_done(ep, req, -ECONNRESET);
1460 /* EDTLA is > 0: request has been partially completed */
1461 if (req->usb_req.actual > 0) {
1463 * Abort the pending transfer and update the dequeue
1466 ep_ctx_write_edtla(ep->context, 0);
1467 ep_ctx_write_partial_td(ep->context, 0);
1468 ep_ctx_write_data_offset(ep->context, 0);
1470 deq_ptr = trb_virt_to_phys(ep,
1471 &ep->transfer_ring[ep->enq_ptr]);
1473 if (dma_mapping_error(xudc->dev, deq_ptr)) {
1476 ep_ctx_write_deq_ptr(ep->context, deq_ptr);
1477 ep_ctx_write_dcs(ep->context, ep->pcs);
1478 ep_reload(xudc, ep->index);
1481 } else if (trb_before_request(ep, req, deq_trb) && busy) {
1482 /* Request hasn't started processing yet. */
1483 squeeze_transfer_ring(ep, req);
1485 tegra_xudc_req_done(ep, req, -ECONNRESET);
1489 * Request has completed, but we haven't processed the
1490 * completion event yet.
1492 tegra_xudc_req_done(ep, req, -ECONNRESET);
1496 /* Resume the endpoint. */
1497 ep_unpause(xudc, ep->index);
1500 tegra_xudc_ep_kick_queue(ep);
1506 tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
1508 struct tegra_xudc_request *req;
1509 struct tegra_xudc_ep *ep;
1510 struct tegra_xudc *xudc;
1511 unsigned long flags;
1514 if (!usb_ep || !usb_req)
1517 ep = to_xudc_ep(usb_ep);
1518 req = to_xudc_req(usb_req);
1521 spin_lock_irqsave(&xudc->lock, flags);
1523 if (xudc->powergated || !ep->desc) {
1528 ret = __tegra_xudc_ep_dequeue(ep, req);
1530 spin_unlock_irqrestore(&xudc->lock, flags);
1535 static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
1537 struct tegra_xudc *xudc = ep->xudc;
1542 if (usb_endpoint_xfer_isoc(ep->desc)) {
1543 dev_err(xudc->dev, "can't halt isochronous EP\n");
1547 if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
1548 dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
1549 halt ? "halted" : "not halted");
1554 ep_halt(xudc, ep->index);
1556 ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1558 ep_reload(xudc, ep->index);
1560 ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1561 ep_ctx_write_rsvd(ep->context, 0);
1562 ep_ctx_write_partial_td(ep->context, 0);
1563 ep_ctx_write_splitxstate(ep->context, 0);
1564 ep_ctx_write_seq_num(ep->context, 0);
1566 ep_reload(xudc, ep->index);
1567 ep_unpause(xudc, ep->index);
1568 ep_unhalt(xudc, ep->index);
1570 tegra_xudc_ep_ring_doorbell(ep);
1576 static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
1578 struct tegra_xudc_ep *ep;
1579 struct tegra_xudc *xudc;
1580 unsigned long flags;
1586 ep = to_xudc_ep(usb_ep);
1589 spin_lock_irqsave(&xudc->lock, flags);
1590 if (xudc->powergated) {
1595 if (value && usb_endpoint_dir_in(ep->desc) &&
1596 !list_empty(&ep->queue)) {
1597 dev_err(xudc->dev, "can't halt EP with requests pending\n");
1602 ret = __tegra_xudc_ep_set_halt(ep, value);
1604 spin_unlock_irqrestore(&xudc->lock, flags);
1609 static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
1611 const struct usb_endpoint_descriptor *desc = ep->desc;
1612 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1613 struct tegra_xudc *xudc = ep->xudc;
1614 u16 maxpacket, maxburst = 0, esit = 0;
1617 maxpacket = usb_endpoint_maxp(desc);
1618 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1619 if (!usb_endpoint_xfer_control(desc))
1620 maxburst = comp_desc->bMaxBurst;
1622 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
1623 esit = le16_to_cpu(comp_desc->wBytesPerInterval);
1624 } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
1625 (usb_endpoint_xfer_int(desc) ||
1626 usb_endpoint_xfer_isoc(desc))) {
1627 if (xudc->gadget.speed == USB_SPEED_HIGH) {
1628 maxburst = usb_endpoint_maxp_mult(desc) - 1;
1629 if (maxburst == 0x3) {
1631 "invalid endpoint maxburst\n");
1635 esit = maxpacket * (maxburst + 1);
1638 memset(ep->context, 0, sizeof(*ep->context));
1640 ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1641 ep_ctx_write_interval(ep->context, desc->bInterval);
1642 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1643 if (usb_endpoint_xfer_isoc(desc)) {
1644 ep_ctx_write_mult(ep->context,
1645 comp_desc->bmAttributes & 0x3);
1648 if (usb_endpoint_xfer_bulk(desc)) {
1649 ep_ctx_write_max_pstreams(ep->context,
1650 comp_desc->bmAttributes &
1652 ep_ctx_write_lsa(ep->context, 1);
1656 if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
1657 val = usb_endpoint_type(desc);
1659 val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
1661 ep_ctx_write_type(ep->context, val);
1662 ep_ctx_write_cerr(ep->context, 0x3);
1663 ep_ctx_write_max_packet_size(ep->context, maxpacket);
1664 ep_ctx_write_max_burst_size(ep->context, maxburst);
1666 ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
1667 ep_ctx_write_dcs(ep->context, ep->pcs);
1669 /* Select a reasonable average TRB length based on endpoint type. */
1670 switch (usb_endpoint_type(desc)) {
1671 case USB_ENDPOINT_XFER_CONTROL:
1674 case USB_ENDPOINT_XFER_INT:
1677 case USB_ENDPOINT_XFER_BULK:
1678 case USB_ENDPOINT_XFER_ISOC:
1684 ep_ctx_write_avg_trb_len(ep->context, val);
1685 ep_ctx_write_max_esit_payload(ep->context, esit);
1687 ep_ctx_write_cerrcnt(ep->context, 0x3);
1690 static void setup_link_trb(struct tegra_xudc_ep *ep,
1691 struct tegra_xudc_trb *trb)
1693 trb_write_data_ptr(trb, ep->transfer_ring_phys);
1694 trb_write_type(trb, TRB_TYPE_LINK);
1695 trb_write_toggle_cycle(trb, 1);
1698 static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
1700 struct tegra_xudc *xudc = ep->xudc;
1702 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
1703 dev_err(xudc->dev, "endpoint %u already disabled\n",
1708 ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1710 ep_reload(xudc, ep->index);
1712 tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
1714 xudc->nr_enabled_eps--;
1715 if (usb_endpoint_xfer_isoc(ep->desc))
1716 xudc->nr_isoch_eps--;
1719 ep->comp_desc = NULL;
1721 memset(ep->context, 0, sizeof(*ep->context));
1723 ep_unpause(xudc, ep->index);
1724 ep_unhalt(xudc, ep->index);
1725 if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
1726 xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
1729 * If this is the last endpoint disabled in a de-configure request,
1730 * switch back to address state.
1732 if ((xudc->device_state == USB_STATE_CONFIGURED) &&
1733 (xudc->nr_enabled_eps == 1)) {
1736 xudc->device_state = USB_STATE_ADDRESS;
1737 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1739 val = xudc_readl(xudc, CTRL);
1741 xudc_writel(xudc, val, CTRL);
1744 dev_info(xudc->dev, "ep %u disabled\n", ep->index);
1749 static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
1751 struct tegra_xudc_ep *ep;
1752 struct tegra_xudc *xudc;
1753 unsigned long flags;
1759 ep = to_xudc_ep(usb_ep);
1762 spin_lock_irqsave(&xudc->lock, flags);
1763 if (xudc->powergated) {
1768 ret = __tegra_xudc_ep_disable(ep);
1770 spin_unlock_irqrestore(&xudc->lock, flags);
1775 static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
1776 const struct usb_endpoint_descriptor *desc)
1778 struct tegra_xudc *xudc = ep->xudc;
1782 if (xudc->gadget.speed == USB_SPEED_SUPER &&
1783 !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
1786 /* Disable the EP if it is not disabled */
1787 if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
1788 __tegra_xudc_ep_disable(ep);
1791 ep->comp_desc = ep->usb_ep.comp_desc;
1793 if (usb_endpoint_xfer_isoc(desc)) {
1794 if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
1795 dev_err(xudc->dev, "too many isochronous endpoints\n");
1798 xudc->nr_isoch_eps++;
1801 memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
1802 sizeof(*ep->transfer_ring));
1803 setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
1808 ep->ring_full = false;
1809 xudc->nr_enabled_eps++;
1811 tegra_xudc_ep_context_setup(ep);
1814 * No need to reload and un-halt EP0. This will be done automatically
1815 * once a valid SETUP packet is received.
1817 if (usb_endpoint_xfer_control(desc))
1821 * Transition to configured state once the first non-control
1822 * endpoint is enabled.
1824 if (xudc->device_state == USB_STATE_ADDRESS) {
1825 val = xudc_readl(xudc, CTRL);
1827 xudc_writel(xudc, val, CTRL);
1829 xudc->device_state = USB_STATE_CONFIGURED;
1830 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1833 if (usb_endpoint_xfer_isoc(desc)) {
1835 * Pause all bulk endpoints when enabling an isoch endpoint
1836 * to ensure the isoch endpoint is allocated enough bandwidth.
1838 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1839 if (xudc->ep[i].desc &&
1840 usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1845 ep_reload(xudc, ep->index);
1846 ep_unpause(xudc, ep->index);
1847 ep_unhalt(xudc, ep->index);
1849 if (usb_endpoint_xfer_isoc(desc)) {
1850 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1851 if (xudc->ep[i].desc &&
1852 usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1853 ep_unpause(xudc, i);
1858 dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
1859 usb_ep_type_string(usb_endpoint_type(ep->desc)),
1860 usb_endpoint_dir_in(ep->desc) ? "in" : "out");
1865 static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
1866 const struct usb_endpoint_descriptor *desc)
1868 struct tegra_xudc_ep *ep;
1869 struct tegra_xudc *xudc;
1870 unsigned long flags;
1873 if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
1876 ep = to_xudc_ep(usb_ep);
1879 spin_lock_irqsave(&xudc->lock, flags);
1880 if (xudc->powergated) {
1885 ret = __tegra_xudc_ep_enable(ep, desc);
1887 spin_unlock_irqrestore(&xudc->lock, flags);
1892 static struct usb_request *
1893 tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
1895 struct tegra_xudc_request *req;
1897 req = kzalloc(sizeof(*req), gfp);
1901 INIT_LIST_HEAD(&req->list);
1903 return &req->usb_req;
1906 static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
1907 struct usb_request *usb_req)
1909 struct tegra_xudc_request *req = to_xudc_req(usb_req);
1914 static const struct usb_ep_ops tegra_xudc_ep_ops = {
1915 .enable = tegra_xudc_ep_enable,
1916 .disable = tegra_xudc_ep_disable,
1917 .alloc_request = tegra_xudc_ep_alloc_request,
1918 .free_request = tegra_xudc_ep_free_request,
1919 .queue = tegra_xudc_ep_queue,
1920 .dequeue = tegra_xudc_ep_dequeue,
1921 .set_halt = tegra_xudc_ep_set_halt,
1924 static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
1925 const struct usb_endpoint_descriptor *desc)
1930 static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
1935 static const struct usb_ep_ops tegra_xudc_ep0_ops = {
1936 .enable = tegra_xudc_ep0_enable,
1937 .disable = tegra_xudc_ep0_disable,
1938 .alloc_request = tegra_xudc_ep_alloc_request,
1939 .free_request = tegra_xudc_ep_free_request,
1940 .queue = tegra_xudc_ep_queue,
1941 .dequeue = tegra_xudc_ep_dequeue,
1942 .set_halt = tegra_xudc_ep_set_halt,
1945 static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
1947 struct tegra_xudc *xudc = to_xudc(gadget);
1948 unsigned long flags;
1951 spin_lock_irqsave(&xudc->lock, flags);
1952 if (xudc->powergated) {
1957 ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
1958 MFINDEX_FRAME_SHIFT;
1960 spin_unlock_irqrestore(&xudc->lock, flags);
1965 static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
1970 ep_unpause_all(xudc);
1972 /* Direct link to U0. */
1973 val = xudc_readl(xudc, PORTSC);
1974 if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
1975 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
1976 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
1977 xudc_writel(xudc, val, PORTSC);
1980 if (xudc->device_state == USB_STATE_SUSPENDED) {
1981 xudc->device_state = xudc->resume_state;
1982 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1983 xudc->resume_state = 0;
1987 * Doorbells may be dropped if they are sent too soon (< ~200ns)
1988 * after unpausing the endpoint. Wait for 500ns just to be safe.
1991 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
1992 tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
1995 static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
1997 struct tegra_xudc *xudc = to_xudc(gadget);
1998 unsigned long flags;
2002 spin_lock_irqsave(&xudc->lock, flags);
2004 if (xudc->powergated) {
2008 val = xudc_readl(xudc, PORTPM);
2009 dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
2010 val, gadget->speed);
2012 if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
2013 (val & PORTPM_RWE)) ||
2014 ((xudc->gadget.speed == USB_SPEED_SUPER) &&
2015 (val & PORTPM_FRWE))) {
2016 tegra_xudc_resume_device_state(xudc);
2018 /* Send Device Notification packet. */
2019 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2020 val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
2022 xudc_writel(xudc, 0, DEVNOTIF_HI);
2023 xudc_writel(xudc, val, DEVNOTIF_LO);
2028 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2029 spin_unlock_irqrestore(&xudc->lock, flags);
2034 static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
2036 struct tegra_xudc *xudc = to_xudc(gadget);
2037 unsigned long flags;
2040 pm_runtime_get_sync(xudc->dev);
2042 spin_lock_irqsave(&xudc->lock, flags);
2044 if (is_on != xudc->pullup) {
2045 val = xudc_readl(xudc, CTRL);
2049 val &= ~CTRL_ENABLE;
2050 xudc_writel(xudc, val, CTRL);
2053 xudc->pullup = is_on;
2054 dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
2056 spin_unlock_irqrestore(&xudc->lock, flags);
2058 pm_runtime_put(xudc->dev);
2063 static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
2064 struct usb_gadget_driver *driver)
2066 struct tegra_xudc *xudc = to_xudc(gadget);
2067 unsigned long flags;
2075 pm_runtime_get_sync(xudc->dev);
2077 spin_lock_irqsave(&xudc->lock, flags);
2084 xudc->setup_state = WAIT_FOR_SETUP;
2085 xudc->device_state = USB_STATE_DEFAULT;
2086 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2088 ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
2092 val = xudc_readl(xudc, CTRL);
2093 val |= CTRL_IE | CTRL_LSE;
2094 xudc_writel(xudc, val, CTRL);
2096 val = xudc_readl(xudc, PORTHALT);
2097 val |= PORTHALT_STCHG_INTR_EN;
2098 xudc_writel(xudc, val, PORTHALT);
2101 val = xudc_readl(xudc, CTRL);
2103 xudc_writel(xudc, val, CTRL);
2106 for (i = 0; i < xudc->soc->num_phys; i++)
2107 if (xudc->usbphy[i])
2108 otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
2110 xudc->driver = driver;
2112 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2113 spin_unlock_irqrestore(&xudc->lock, flags);
2115 pm_runtime_put(xudc->dev);
2120 static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
2122 struct tegra_xudc *xudc = to_xudc(gadget);
2123 unsigned long flags;
2127 pm_runtime_get_sync(xudc->dev);
2129 spin_lock_irqsave(&xudc->lock, flags);
2131 for (i = 0; i < xudc->soc->num_phys; i++)
2132 if (xudc->usbphy[i])
2133 otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
2135 val = xudc_readl(xudc, CTRL);
2136 val &= ~(CTRL_IE | CTRL_ENABLE);
2137 xudc_writel(xudc, val, CTRL);
2139 __tegra_xudc_ep_disable(&xudc->ep[0]);
2141 xudc->driver = NULL;
2142 dev_dbg(xudc->dev, "Gadget stopped");
2144 spin_unlock_irqrestore(&xudc->lock, flags);
2146 pm_runtime_put(xudc->dev);
2151 static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
2155 struct tegra_xudc *xudc = to_xudc(gadget);
2157 dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
2159 if (xudc->curr_usbphy->chg_type == SDP_TYPE)
2160 ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
2165 static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
2167 struct tegra_xudc *xudc = to_xudc(gadget);
2169 dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
2170 xudc->selfpowered = !!is_on;
2175 static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
2176 .get_frame = tegra_xudc_gadget_get_frame,
2177 .wakeup = tegra_xudc_gadget_wakeup,
2178 .pullup = tegra_xudc_gadget_pullup,
2179 .udc_start = tegra_xudc_gadget_start,
2180 .udc_stop = tegra_xudc_gadget_stop,
2181 .vbus_draw = tegra_xudc_gadget_vbus_draw,
2182 .set_selfpowered = tegra_xudc_set_selfpowered,
2185 static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
2190 tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
2191 void (*cmpl)(struct usb_ep *, struct usb_request *))
2193 xudc->ep0_req->usb_req.buf = NULL;
2194 xudc->ep0_req->usb_req.dma = 0;
2195 xudc->ep0_req->usb_req.length = 0;
2196 xudc->ep0_req->usb_req.complete = cmpl;
2197 xudc->ep0_req->usb_req.context = xudc;
2199 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2203 tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
2204 void (*cmpl)(struct usb_ep *, struct usb_request *))
2206 xudc->ep0_req->usb_req.buf = buf;
2207 xudc->ep0_req->usb_req.length = len;
2208 xudc->ep0_req->usb_req.complete = cmpl;
2209 xudc->ep0_req->usb_req.context = xudc;
2211 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2214 static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
2216 switch (xudc->setup_state) {
2217 case DATA_STAGE_XFER:
2218 xudc->setup_state = STATUS_STAGE_RECV;
2219 tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2221 case DATA_STAGE_RECV:
2222 xudc->setup_state = STATUS_STAGE_XFER;
2223 tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2226 xudc->setup_state = WAIT_FOR_SETUP;
2231 static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
2232 struct usb_ctrlrequest *ctrl)
2236 spin_unlock(&xudc->lock);
2237 ret = xudc->driver->setup(&xudc->gadget, ctrl);
2238 spin_lock(&xudc->lock);
2243 static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
2245 struct tegra_xudc *xudc = req->context;
2247 if (xudc->test_mode_pattern) {
2248 xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
2249 xudc->test_mode_pattern = 0;
2253 static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
2254 struct usb_ctrlrequest *ctrl)
2256 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
2257 u32 feature = le16_to_cpu(ctrl->wValue);
2258 u32 index = le16_to_cpu(ctrl->wIndex);
2262 if (le16_to_cpu(ctrl->wLength) != 0)
2265 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2266 case USB_RECIP_DEVICE:
2268 case USB_DEVICE_REMOTE_WAKEUP:
2269 if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
2270 (xudc->device_state == USB_STATE_DEFAULT))
2273 val = xudc_readl(xudc, PORTPM);
2279 xudc_writel(xudc, val, PORTPM);
2281 case USB_DEVICE_U1_ENABLE:
2282 case USB_DEVICE_U2_ENABLE:
2283 if ((xudc->device_state != USB_STATE_CONFIGURED) ||
2284 (xudc->gadget.speed != USB_SPEED_SUPER))
2287 val = xudc_readl(xudc, PORTPM);
2288 if ((feature == USB_DEVICE_U1_ENABLE) &&
2289 xudc->soc->u1_enable) {
2296 if ((feature == USB_DEVICE_U2_ENABLE) &&
2297 xudc->soc->u2_enable) {
2304 xudc_writel(xudc, val, PORTPM);
2306 case USB_DEVICE_TEST_MODE:
2307 if (xudc->gadget.speed != USB_SPEED_HIGH)
2313 xudc->test_mode_pattern = index >> 8;
2320 case USB_RECIP_INTERFACE:
2321 if (xudc->device_state != USB_STATE_CONFIGURED)
2325 case USB_INTRF_FUNC_SUSPEND:
2327 val = xudc_readl(xudc, PORTPM);
2329 if (index & USB_INTRF_FUNC_SUSPEND_RW)
2332 val &= ~PORTPM_FRWE;
2334 xudc_writel(xudc, val, PORTPM);
2337 return tegra_xudc_ep0_delegate_req(xudc, ctrl);
2343 case USB_RECIP_ENDPOINT:
2344 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2345 ((index & USB_DIR_IN) ? 1 : 0);
2347 if ((xudc->device_state == USB_STATE_DEFAULT) ||
2348 ((xudc->device_state == USB_STATE_ADDRESS) &&
2352 ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
2360 return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
2363 static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
2364 struct usb_ctrlrequest *ctrl)
2366 struct tegra_xudc_ep_context *ep_ctx;
2367 u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
2370 if (!(ctrl->bRequestType & USB_DIR_IN))
2373 if ((le16_to_cpu(ctrl->wValue) != 0) ||
2374 (le16_to_cpu(ctrl->wLength) != 2))
2377 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2378 case USB_RECIP_DEVICE:
2379 val = xudc_readl(xudc, PORTPM);
2381 if (xudc->selfpowered)
2382 status |= BIT(USB_DEVICE_SELF_POWERED);
2384 if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
2386 status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2388 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2389 if (val & PORTPM_U1E)
2390 status |= BIT(USB_DEV_STAT_U1_ENABLED);
2391 if (val & PORTPM_U2E)
2392 status |= BIT(USB_DEV_STAT_U2_ENABLED);
2395 case USB_RECIP_INTERFACE:
2396 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2397 status |= USB_INTRF_STAT_FUNC_RW_CAP;
2398 val = xudc_readl(xudc, PORTPM);
2399 if (val & PORTPM_FRWE)
2400 status |= USB_INTRF_STAT_FUNC_RW;
2403 case USB_RECIP_ENDPOINT:
2404 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2405 ((index & USB_DIR_IN) ? 1 : 0);
2406 ep_ctx = &xudc->ep_context[ep];
2408 if ((xudc->device_state != USB_STATE_CONFIGURED) &&
2409 ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
2412 if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
2415 if (xudc_readl(xudc, EP_HALT) & BIT(ep))
2416 status |= BIT(USB_ENDPOINT_HALT);
2422 xudc->status_buf = cpu_to_le16(status);
2423 return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
2424 sizeof(xudc->status_buf),
2428 static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
2430 /* Nothing to do with SEL values */
2433 static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
2434 struct usb_ctrlrequest *ctrl)
2436 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2440 if (xudc->device_state == USB_STATE_DEFAULT)
2443 if ((le16_to_cpu(ctrl->wIndex) != 0) ||
2444 (le16_to_cpu(ctrl->wValue) != 0) ||
2445 (le16_to_cpu(ctrl->wLength) != 6))
2448 return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
2449 sizeof(xudc->sel_timing),
2453 static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
2455 /* Nothing to do with isoch delay */
2458 static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
2459 struct usb_ctrlrequest *ctrl)
2461 u32 delay = le16_to_cpu(ctrl->wValue);
2463 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2467 if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2468 (le16_to_cpu(ctrl->wLength) != 0))
2471 xudc->isoch_delay = delay;
2473 return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
2476 static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
2478 struct tegra_xudc *xudc = req->context;
2480 if ((xudc->device_state == USB_STATE_DEFAULT) &&
2481 (xudc->dev_addr != 0)) {
2482 xudc->device_state = USB_STATE_ADDRESS;
2483 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2484 } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
2485 (xudc->dev_addr == 0)) {
2486 xudc->device_state = USB_STATE_DEFAULT;
2487 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2491 static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
2492 struct usb_ctrlrequest *ctrl)
2494 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2495 u32 val, addr = le16_to_cpu(ctrl->wValue);
2497 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2501 if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2502 (le16_to_cpu(ctrl->wLength) != 0))
2505 if (xudc->device_state == USB_STATE_CONFIGURED)
2508 dev_dbg(xudc->dev, "set address: %u\n", addr);
2510 xudc->dev_addr = addr;
2511 val = xudc_readl(xudc, CTRL);
2512 val &= ~(CTRL_DEVADDR_MASK);
2513 val |= CTRL_DEVADDR(addr);
2514 xudc_writel(xudc, val, CTRL);
2516 ep_ctx_write_devaddr(ep0->context, addr);
2518 return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
2521 static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
2522 struct usb_ctrlrequest *ctrl)
2526 switch (ctrl->bRequest) {
2527 case USB_REQ_GET_STATUS:
2528 dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
2529 ret = tegra_xudc_ep0_get_status(xudc, ctrl);
2531 case USB_REQ_SET_ADDRESS:
2532 dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
2533 ret = tegra_xudc_ep0_set_address(xudc, ctrl);
2535 case USB_REQ_SET_SEL:
2536 dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
2537 ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
2539 case USB_REQ_SET_ISOCH_DELAY:
2540 dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
2541 ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
2543 case USB_REQ_CLEAR_FEATURE:
2544 case USB_REQ_SET_FEATURE:
2545 dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
2546 ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
2548 case USB_REQ_SET_CONFIGURATION:
2549 dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
2551 * In theory we need to clear RUN bit before status stage of
2552 * deconfig request sent, but this seems to be causing problems.
2553 * Clear RUN once all endpoints are disabled instead.
2557 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2564 static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
2565 struct usb_ctrlrequest *ctrl,
2570 xudc->setup_seq_num = seq_num;
2572 /* Ensure EP0 is unhalted. */
2576 * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
2577 * are invalid. Halt EP0 until we get a valid packet.
2579 if (xudc->soc->invalid_seq_num &&
2580 (seq_num == 0xfffe || seq_num == 0xffff)) {
2581 dev_warn(xudc->dev, "invalid sequence number detected\n");
2587 xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
2588 DATA_STAGE_XFER : DATA_STAGE_RECV;
2590 xudc->setup_state = STATUS_STAGE_XFER;
2592 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
2593 ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
2595 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2598 dev_warn(xudc->dev, "setup request failed: %d\n", ret);
2599 xudc->setup_state = WAIT_FOR_SETUP;
2604 static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
2605 struct tegra_xudc_trb *event)
2607 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
2608 u16 seq_num = trb_read_seq_num(event);
2610 if (xudc->setup_state != WAIT_FOR_SETUP) {
2612 * The controller is in the process of handling another
2613 * setup request. Queue subsequent requests and handle
2614 * the last one once the controller reports a sequence
2617 memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
2618 xudc->setup_packet.seq_num = seq_num;
2619 xudc->queued_setup_packet = true;
2621 tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
2625 static struct tegra_xudc_request *
2626 trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
2628 struct tegra_xudc_request *req;
2630 list_for_each_entry(req, &ep->queue, list) {
2631 if (!req->trbs_queued)
2634 if (trb_in_request(ep, req, trb))
2641 static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
2642 struct tegra_xudc_ep *ep,
2643 struct tegra_xudc_trb *event)
2645 struct tegra_xudc_request *req;
2646 struct tegra_xudc_trb *trb;
2649 short_packet = (trb_read_cmpl_code(event) ==
2650 TRB_CMPL_CODE_SHORT_PACKET);
2652 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2653 req = trb_to_request(ep, trb);
2656 * TDs are complete on short packet or when the completed TRB is the
2657 * last TRB in the TD (the CHAIN bit is unset).
2659 if (req && (short_packet || (!trb_read_chain(trb) &&
2660 (req->trbs_needed == req->trbs_queued)))) {
2661 struct tegra_xudc_trb *last = req->last_trb;
2662 unsigned int residual;
2664 residual = trb_read_transfer_len(event);
2665 req->usb_req.actual = req->usb_req.length - residual;
2667 dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
2668 req->usb_req.actual, req->usb_req.length);
2670 tegra_xudc_req_done(ep, req, 0);
2672 if (ep->desc && usb_endpoint_xfer_control(ep->desc))
2673 tegra_xudc_ep0_req_done(xudc);
2676 * Advance the dequeue pointer past the end of the current TD
2677 * on short packet completion.
2680 ep->deq_ptr = (last - ep->transfer_ring) + 1;
2681 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2685 dev_warn(xudc->dev, "transfer event on dequeued request\n");
2689 tegra_xudc_ep_kick_queue(ep);
2692 static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
2693 struct tegra_xudc_trb *event)
2695 unsigned int ep_index = trb_read_endpoint_id(event);
2696 struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
2697 struct tegra_xudc_trb *trb;
2700 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
2701 dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
2706 /* Update transfer ring dequeue pointer. */
2707 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2708 comp_code = trb_read_cmpl_code(event);
2709 if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
2710 ep->deq_ptr = (trb - ep->transfer_ring) + 1;
2712 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2714 ep->ring_full = false;
2717 switch (comp_code) {
2718 case TRB_CMPL_CODE_SUCCESS:
2719 case TRB_CMPL_CODE_SHORT_PACKET:
2720 tegra_xudc_handle_transfer_completion(xudc, ep, event);
2722 case TRB_CMPL_CODE_HOST_REJECTED:
2723 dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
2725 ep->stream_rejected = true;
2727 case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
2728 dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
2730 if (ep->stream_rejected) {
2731 ep->stream_rejected = false;
2733 * An EP is stopped when a stream is rejected. Wait
2734 * for the EP to report that it is stopped and then
2737 ep_wait_for_stopped(xudc, ep_index);
2739 tegra_xudc_ep_ring_doorbell(ep);
2741 case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
2743 * Wait for the EP to be stopped so the controller stops
2744 * processing doorbells.
2746 ep_wait_for_stopped(xudc, ep_index);
2747 ep->enq_ptr = ep->deq_ptr;
2748 tegra_xudc_ep_nuke(ep, -EIO);
2750 case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
2751 case TRB_CMPL_CODE_CTRL_DIR_ERR:
2752 case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
2753 case TRB_CMPL_CODE_RING_UNDERRUN:
2754 case TRB_CMPL_CODE_RING_OVERRUN:
2755 case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
2756 case TRB_CMPL_CODE_USB_TRANS_ERR:
2757 case TRB_CMPL_CODE_TRB_ERR:
2758 dev_err(xudc->dev, "completion error %#x on EP %u\n",
2759 comp_code, ep_index);
2761 ep_halt(xudc, ep_index);
2763 case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
2764 dev_info(xudc->dev, "sequence number error\n");
2767 * Kill any queued control request and skip to the last
2768 * setup packet we received.
2770 tegra_xudc_ep_nuke(ep, -EINVAL);
2771 xudc->setup_state = WAIT_FOR_SETUP;
2772 if (!xudc->queued_setup_packet)
2775 tegra_xudc_handle_ep0_setup_packet(xudc,
2776 &xudc->setup_packet.ctrl_req,
2777 xudc->setup_packet.seq_num);
2778 xudc->queued_setup_packet = false;
2780 case TRB_CMPL_CODE_STOPPED:
2781 dev_dbg(xudc->dev, "stop completion code on EP %u\n",
2785 tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
2788 dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
2789 comp_code, ep_index);
2794 static void tegra_xudc_reset(struct tegra_xudc *xudc)
2796 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2800 xudc->setup_state = WAIT_FOR_SETUP;
2801 xudc->device_state = USB_STATE_DEFAULT;
2802 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2804 ep_unpause_all(xudc);
2806 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
2807 tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
2810 * Reset sequence number and dequeue pointer to flush the transfer
2813 ep0->deq_ptr = ep0->enq_ptr;
2814 ep0->ring_full = false;
2816 xudc->setup_seq_num = 0;
2817 xudc->queued_setup_packet = false;
2819 ep_ctx_write_rsvd(ep0->context, 0);
2820 ep_ctx_write_partial_td(ep0->context, 0);
2821 ep_ctx_write_splitxstate(ep0->context, 0);
2822 ep_ctx_write_seq_num(ep0->context, 0);
2824 deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
2826 if (!dma_mapping_error(xudc->dev, deq_ptr)) {
2827 ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
2828 ep_ctx_write_dcs(ep0->context, ep0->pcs);
2831 ep_unhalt_all(xudc);
2833 ep_unpause(xudc, 0);
2836 static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
2838 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2842 val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
2845 xudc->gadget.speed = USB_SPEED_LOW;
2848 xudc->gadget.speed = USB_SPEED_FULL;
2851 xudc->gadget.speed = USB_SPEED_HIGH;
2854 xudc->gadget.speed = USB_SPEED_SUPER;
2857 xudc->gadget.speed = USB_SPEED_UNKNOWN;
2861 xudc->device_state = USB_STATE_DEFAULT;
2862 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2864 xudc->setup_state = WAIT_FOR_SETUP;
2866 if (xudc->gadget.speed == USB_SPEED_SUPER)
2871 ep_ctx_write_max_packet_size(ep0->context, maxpacket);
2872 tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
2873 usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
2875 if (!xudc->soc->u1_enable) {
2876 val = xudc_readl(xudc, PORTPM);
2877 val &= ~(PORTPM_U1TIMEOUT_MASK);
2878 xudc_writel(xudc, val, PORTPM);
2881 if (!xudc->soc->u2_enable) {
2882 val = xudc_readl(xudc, PORTPM);
2883 val &= ~(PORTPM_U2TIMEOUT_MASK);
2884 xudc_writel(xudc, val, PORTPM);
2887 if (xudc->gadget.speed <= USB_SPEED_HIGH) {
2888 val = xudc_readl(xudc, PORTPM);
2889 val &= ~(PORTPM_L1S_MASK);
2890 if (xudc->soc->lpm_enable)
2891 val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
2893 val |= PORTPM_L1S(PORTPM_L1S_NYET);
2894 xudc_writel(xudc, val, PORTPM);
2897 val = xudc_readl(xudc, ST);
2899 xudc_writel(xudc, ST_RC, ST);
2902 static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
2904 tegra_xudc_reset(xudc);
2906 if (xudc->driver && xudc->driver->disconnect) {
2907 spin_unlock(&xudc->lock);
2908 xudc->driver->disconnect(&xudc->gadget);
2909 spin_lock(&xudc->lock);
2912 xudc->device_state = USB_STATE_NOTATTACHED;
2913 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2915 complete(&xudc->disconnect_complete);
2918 static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
2920 tegra_xudc_reset(xudc);
2923 spin_unlock(&xudc->lock);
2924 usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
2925 spin_lock(&xudc->lock);
2928 tegra_xudc_port_connect(xudc);
2931 static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
2933 dev_dbg(xudc->dev, "port suspend\n");
2935 xudc->resume_state = xudc->device_state;
2936 xudc->device_state = USB_STATE_SUSPENDED;
2937 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2939 if (xudc->driver->suspend) {
2940 spin_unlock(&xudc->lock);
2941 xudc->driver->suspend(&xudc->gadget);
2942 spin_lock(&xudc->lock);
2946 static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
2948 dev_dbg(xudc->dev, "port resume\n");
2950 tegra_xudc_resume_device_state(xudc);
2952 if (xudc->driver->resume) {
2953 spin_unlock(&xudc->lock);
2954 xudc->driver->resume(&xudc->gadget);
2955 spin_lock(&xudc->lock);
2959 static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
2963 val = xudc_readl(xudc, PORTSC);
2964 val &= ~PORTSC_CHANGE_MASK;
2966 xudc_writel(xudc, val, PORTSC);
2969 static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
2971 u32 portsc, porthalt;
2973 porthalt = xudc_readl(xudc, PORTHALT);
2974 if ((porthalt & PORTHALT_STCHG_REQ) &&
2975 (porthalt & PORTHALT_HALT_LTSSM)) {
2976 dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
2977 porthalt &= ~PORTHALT_HALT_LTSSM;
2978 xudc_writel(xudc, porthalt, PORTHALT);
2981 portsc = xudc_readl(xudc, PORTSC);
2982 if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
2983 dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
2984 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2985 #define TOGGLE_VBUS_WAIT_MS 100
2986 if (xudc->soc->port_reset_quirk) {
2987 schedule_delayed_work(&xudc->port_reset_war_work,
2988 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
2989 xudc->wait_for_sec_prc = 1;
2993 if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
2994 dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
2995 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2996 tegra_xudc_port_reset(xudc);
2997 cancel_delayed_work(&xudc->port_reset_war_work);
2998 xudc->wait_for_sec_prc = 0;
3001 portsc = xudc_readl(xudc, PORTSC);
3002 if (portsc & PORTSC_WRC) {
3003 dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
3004 clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
3005 if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
3006 tegra_xudc_port_reset(xudc);
3009 portsc = xudc_readl(xudc, PORTSC);
3010 if (portsc & PORTSC_CSC) {
3011 dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
3012 clear_port_change(xudc, PORTSC_CSC);
3014 if (portsc & PORTSC_CCS)
3015 tegra_xudc_port_connect(xudc);
3017 tegra_xudc_port_disconnect(xudc);
3019 if (xudc->wait_csc) {
3020 cancel_delayed_work(&xudc->plc_reset_work);
3021 xudc->wait_csc = false;
3025 portsc = xudc_readl(xudc, PORTSC);
3026 if (portsc & PORTSC_PLC) {
3027 u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
3029 dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
3030 clear_port_change(xudc, PORTSC_PLC);
3033 tegra_xudc_port_suspend(xudc);
3036 if (xudc->gadget.speed < USB_SPEED_SUPER)
3037 tegra_xudc_port_resume(xudc);
3039 case PORTSC_PLS_RESUME:
3040 if (xudc->gadget.speed == USB_SPEED_SUPER)
3041 tegra_xudc_port_resume(xudc);
3043 case PORTSC_PLS_INACTIVE:
3044 schedule_delayed_work(&xudc->plc_reset_work,
3045 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
3046 xudc->wait_csc = true;
3053 if (portsc & PORTSC_CEC) {
3054 dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
3055 clear_port_change(xudc, PORTSC_CEC);
3058 dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
3061 static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
3063 while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
3064 (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
3065 __tegra_xudc_handle_port_status(xudc);
3068 static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
3069 struct tegra_xudc_trb *event)
3071 u32 type = trb_read_type(event);
3073 dump_trb(xudc, "EVENT", event);
3076 case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
3077 tegra_xudc_handle_port_status(xudc);
3079 case TRB_TYPE_TRANSFER_EVENT:
3080 tegra_xudc_handle_transfer_event(xudc, event);
3082 case TRB_TYPE_SETUP_PACKET_EVENT:
3083 tegra_xudc_handle_ep0_event(xudc, event);
3086 dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
3091 static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
3093 struct tegra_xudc_trb *event;
3097 event = xudc->event_ring[xudc->event_ring_index] +
3098 xudc->event_ring_deq_ptr;
3100 if (trb_read_cycle(event) != xudc->ccs)
3103 tegra_xudc_handle_event(xudc, event);
3105 xudc->event_ring_deq_ptr++;
3106 if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
3107 xudc->event_ring_deq_ptr = 0;
3108 xudc->event_ring_index++;
3111 if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
3112 xudc->event_ring_index = 0;
3113 xudc->ccs = !xudc->ccs;
3117 erdp = xudc->event_ring_phys[xudc->event_ring_index] +
3118 xudc->event_ring_deq_ptr * sizeof(*event);
3120 xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
3121 xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
3124 static irqreturn_t tegra_xudc_irq(int irq, void *data)
3126 struct tegra_xudc *xudc = data;
3127 unsigned long flags;
3130 val = xudc_readl(xudc, ST);
3133 xudc_writel(xudc, ST_IP, ST);
3135 spin_lock_irqsave(&xudc->lock, flags);
3136 tegra_xudc_process_event_ring(xudc);
3137 spin_unlock_irqrestore(&xudc->lock, flags);
3142 static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
3144 struct tegra_xudc_ep *ep = &xudc->ep[index];
3148 ep->context = &xudc->ep_context[index];
3149 INIT_LIST_HEAD(&ep->queue);
3152 * EP1 would be the input endpoint corresponding to EP0, but since
3153 * EP0 is bi-directional, EP1 is unused.
3158 ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
3160 &ep->transfer_ring_phys);
3161 if (!ep->transfer_ring)
3165 snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
3166 (index % 2 == 0) ? "out" : "in");
3167 ep->usb_ep.name = ep->name;
3168 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
3169 ep->usb_ep.max_streams = 16;
3170 ep->usb_ep.ops = &tegra_xudc_ep_ops;
3171 ep->usb_ep.caps.type_bulk = true;
3172 ep->usb_ep.caps.type_int = true;
3174 ep->usb_ep.caps.dir_in = true;
3176 ep->usb_ep.caps.dir_out = true;
3177 list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
3179 strscpy(ep->name, "ep0", 3);
3180 ep->usb_ep.name = ep->name;
3181 usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
3182 ep->usb_ep.ops = &tegra_xudc_ep0_ops;
3183 ep->usb_ep.caps.type_control = true;
3184 ep->usb_ep.caps.dir_in = true;
3185 ep->usb_ep.caps.dir_out = true;
3191 static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
3193 struct tegra_xudc_ep *ep = &xudc->ep[index];
3196 * EP1 would be the input endpoint corresponding to EP0, but since
3197 * EP0 is bi-directional, EP1 is unused.
3202 dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
3203 ep->transfer_ring_phys);
3206 static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
3208 struct usb_request *req;
3213 dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
3214 sizeof(*xudc->ep_context),
3215 &xudc->ep_context_phys, GFP_KERNEL);
3216 if (!xudc->ep_context)
3219 xudc->transfer_ring_pool =
3220 dmam_pool_create(dev_name(xudc->dev), xudc->dev,
3221 XUDC_TRANSFER_RING_SIZE *
3222 sizeof(struct tegra_xudc_trb),
3223 sizeof(struct tegra_xudc_trb), 0);
3224 if (!xudc->transfer_ring_pool) {
3226 goto free_ep_context;
3229 INIT_LIST_HEAD(&xudc->gadget.ep_list);
3230 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
3231 err = tegra_xudc_alloc_ep(xudc, i);
3236 req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
3241 xudc->ep0_req = to_xudc_req(req);
3247 tegra_xudc_free_ep(xudc, i - 1);
3249 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3250 xudc->ep_context, xudc->ep_context_phys);
3254 static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
3256 xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
3257 xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
3260 static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
3264 tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
3265 &xudc->ep0_req->usb_req);
3267 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
3268 tegra_xudc_free_ep(xudc, i);
3270 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3271 xudc->ep_context, xudc->ep_context_phys);
3274 static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
3278 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3279 xudc->event_ring[i] =
3280 dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3281 sizeof(*xudc->event_ring[i]),
3282 &xudc->event_ring_phys[i],
3284 if (!xudc->event_ring[i])
3291 for (; i > 0; i--) {
3292 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3293 sizeof(*xudc->event_ring[i - 1]),
3294 xudc->event_ring[i - 1],
3295 xudc->event_ring_phys[i - 1]);
3300 static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
3305 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3306 memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
3307 sizeof(*xudc->event_ring[i]));
3309 val = xudc_readl(xudc, ERSTSZ);
3310 val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
3311 val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
3312 xudc_writel(xudc, val, ERSTSZ);
3314 xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
3316 xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
3320 val = lower_32_bits(xudc->event_ring_phys[0]);
3321 xudc_writel(xudc, val, ERDPLO);
3323 xudc_writel(xudc, val, EREPLO);
3325 val = upper_32_bits(xudc->event_ring_phys[0]);
3326 xudc_writel(xudc, val, ERDPHI);
3327 xudc_writel(xudc, val, EREPHI);
3330 xudc->event_ring_index = 0;
3331 xudc->event_ring_deq_ptr = 0;
3334 static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
3338 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3339 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3340 sizeof(*xudc->event_ring[i]),
3341 xudc->event_ring[i],
3342 xudc->event_ring_phys[i]);
3346 static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
3350 if (xudc->soc->has_ipfs) {
3351 val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
3352 val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
3353 ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
3354 usleep_range(10, 15);
3357 /* Enable bus master */
3358 val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
3359 XUSB_DEV_CFG_1_BUS_MASTER_EN;
3360 fpci_writel(xudc, val, XUSB_DEV_CFG_1);
3362 /* Program BAR0 space */
3363 val = fpci_readl(xudc, XUSB_DEV_CFG_4);
3364 val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3365 val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3367 fpci_writel(xudc, val, XUSB_DEV_CFG_4);
3368 fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
3370 usleep_range(100, 200);
3372 if (xudc->soc->has_ipfs) {
3373 /* Enable interrupt assertion */
3374 val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
3375 val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
3376 ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
3380 static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
3384 if (xudc->soc->has_ipfs) {
3385 val = xudc_readl(xudc, BLCG);
3387 val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
3388 BLCG_COREPLL_PWRDN);
3389 val |= BLCG_IOPLL_0_PWRDN;
3390 val |= BLCG_IOPLL_1_PWRDN;
3391 val |= BLCG_IOPLL_2_PWRDN;
3393 xudc_writel(xudc, val, BLCG);
3396 if (xudc->soc->port_speed_quirk)
3397 tegra_xudc_limit_port_speed(xudc);
3399 /* Set a reasonable U3 exit timer value. */
3400 val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
3401 val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
3402 val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
3403 xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
3405 /* Default ping LFPS tBurst is too large. */
3406 val = xudc_readl(xudc, SSPX_CORE_CNT0);
3407 val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
3408 val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
3409 xudc_writel(xudc, val, SSPX_CORE_CNT0);
3411 /* Default tPortConfiguration timeout is too small. */
3412 val = xudc_readl(xudc, SSPX_CORE_CNT30);
3413 val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
3414 val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
3415 xudc_writel(xudc, val, SSPX_CORE_CNT30);
3417 if (xudc->soc->lpm_enable) {
3418 /* Set L1 resume duration to 95 us. */
3419 val = xudc_readl(xudc, HSFSPI_COUNT13);
3420 val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
3421 val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
3422 xudc_writel(xudc, val, HSFSPI_COUNT13);
3426 * Compliance suite appears to be violating polling LFPS tBurst max
3427 * of 1.4us. Send 1.45us instead.
3429 val = xudc_readl(xudc, SSPX_CORE_CNT32);
3430 val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
3431 val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
3432 xudc_writel(xudc, val, SSPX_CORE_CNT32);
3434 /* Direct HS/FS port instance to RxDetect. */
3435 val = xudc_readl(xudc, CFG_DEV_FE);
3436 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3437 val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
3438 xudc_writel(xudc, val, CFG_DEV_FE);
3440 val = xudc_readl(xudc, PORTSC);
3441 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3442 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3443 xudc_writel(xudc, val, PORTSC);
3445 /* Direct SS port instance to RxDetect. */
3446 val = xudc_readl(xudc, CFG_DEV_FE);
3447 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3448 val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
3449 xudc_writel(xudc, val, CFG_DEV_FE);
3451 val = xudc_readl(xudc, PORTSC);
3452 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3453 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3454 xudc_writel(xudc, val, PORTSC);
3456 /* Restore port instance. */
3457 val = xudc_readl(xudc, CFG_DEV_FE);
3458 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3459 xudc_writel(xudc, val, CFG_DEV_FE);
3462 * Enable INFINITE_SS_RETRY to prevent device from entering
3463 * Disabled.Error when attached to buggy SuperSpeed hubs.
3465 val = xudc_readl(xudc, CFG_DEV_FE);
3466 val |= CFG_DEV_FE_INFINITE_SS_RETRY;
3467 xudc_writel(xudc, val, CFG_DEV_FE);
3469 /* Set interrupt moderation. */
3470 imod = XUDC_INTERRUPT_MODERATION_US * 4;
3471 val = xudc_readl(xudc, RT_IMOD);
3472 val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
3473 val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
3474 xudc_writel(xudc, val, RT_IMOD);
3476 /* increase SSPI transaction timeout from 32us to 512us */
3477 val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
3478 val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
3479 val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
3480 xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
3483 static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
3488 xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3489 sizeof(*xudc->utmi_phy), GFP_KERNEL);
3490 if (!xudc->utmi_phy)
3493 xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3494 sizeof(*xudc->usb3_phy), GFP_KERNEL);
3495 if (!xudc->usb3_phy)
3498 xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3499 sizeof(*xudc->usbphy), GFP_KERNEL);
3503 xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
3505 for (i = 0; i < xudc->soc->num_phys; i++) {
3506 char phy_name[] = "usb.-.";
3509 snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
3510 xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3511 if (IS_ERR(xudc->utmi_phy[i])) {
3512 err = PTR_ERR(xudc->utmi_phy[i]);
3513 dev_err_probe(xudc->dev, err,
3514 "failed to get usb2-%d PHY\n", i);
3516 } else if (xudc->utmi_phy[i]) {
3517 /* Get usb-phy, if utmi phy is available */
3518 xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
3519 xudc->utmi_phy[i]->dev.of_node,
3521 if (IS_ERR(xudc->usbphy[i])) {
3522 err = PTR_ERR(xudc->usbphy[i]);
3523 dev_err_probe(xudc->dev, err,
3524 "failed to get usbphy-%d\n", i);
3527 } else if (!xudc->utmi_phy[i]) {
3528 /* if utmi phy is not available, ignore USB3 phy get */
3533 usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
3537 snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
3538 xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3539 if (IS_ERR(xudc->usb3_phy[i])) {
3540 err = PTR_ERR(xudc->usb3_phy[i]);
3541 dev_err_probe(xudc->dev, err,
3542 "failed to get usb3-%d PHY\n", usb3);
3544 } else if (xudc->usb3_phy[i])
3545 dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
3551 for (i = 0; i < xudc->soc->num_phys; i++) {
3552 xudc->usb3_phy[i] = NULL;
3553 xudc->utmi_phy[i] = NULL;
3554 xudc->usbphy[i] = NULL;
3560 static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
3564 for (i = 0; i < xudc->soc->num_phys; i++) {
3565 phy_exit(xudc->usb3_phy[i]);
3566 phy_exit(xudc->utmi_phy[i]);
3570 static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
3575 for (i = 0; i < xudc->soc->num_phys; i++) {
3576 err = phy_init(xudc->utmi_phy[i]);
3578 dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
3582 err = phy_init(xudc->usb3_phy[i]);
3584 dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
3591 tegra_xudc_phy_exit(xudc);
3595 static const char * const tegra210_xudc_supply_names[] = {
3600 static const char * const tegra210_xudc_clock_names[] = {
3608 static const char * const tegra186_xudc_clock_names[] = {
3615 static struct tegra_xudc_soc tegra210_xudc_soc_data = {
3616 .supply_names = tegra210_xudc_supply_names,
3617 .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
3618 .clock_names = tegra210_xudc_clock_names,
3619 .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
3623 .lpm_enable = false,
3624 .invalid_seq_num = true,
3626 .port_reset_quirk = true,
3627 .port_speed_quirk = false,
3631 static struct tegra_xudc_soc tegra186_xudc_soc_data = {
3632 .clock_names = tegra186_xudc_clock_names,
3633 .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
3637 .lpm_enable = false,
3638 .invalid_seq_num = false,
3640 .port_reset_quirk = false,
3641 .port_speed_quirk = false,
3645 static struct tegra_xudc_soc tegra194_xudc_soc_data = {
3646 .clock_names = tegra186_xudc_clock_names,
3647 .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
3652 .invalid_seq_num = false,
3654 .port_reset_quirk = false,
3655 .port_speed_quirk = true,
3659 static const struct of_device_id tegra_xudc_of_match[] = {
3661 .compatible = "nvidia,tegra210-xudc",
3662 .data = &tegra210_xudc_soc_data
3665 .compatible = "nvidia,tegra186-xudc",
3666 .data = &tegra186_xudc_soc_data
3669 .compatible = "nvidia,tegra194-xudc",
3670 .data = &tegra194_xudc_soc_data
3674 MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
3676 static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
3678 if (xudc->genpd_dl_ss)
3679 device_link_del(xudc->genpd_dl_ss);
3680 if (xudc->genpd_dl_device)
3681 device_link_del(xudc->genpd_dl_device);
3682 if (xudc->genpd_dev_ss)
3683 dev_pm_domain_detach(xudc->genpd_dev_ss, true);
3684 if (xudc->genpd_dev_device)
3685 dev_pm_domain_detach(xudc->genpd_dev_device, true);
3688 static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
3690 struct device *dev = xudc->dev;
3693 xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
3694 if (IS_ERR(xudc->genpd_dev_device)) {
3695 err = PTR_ERR(xudc->genpd_dev_device);
3696 dev_err(dev, "failed to get device power domain: %d\n", err);
3700 xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
3701 if (IS_ERR(xudc->genpd_dev_ss)) {
3702 err = PTR_ERR(xudc->genpd_dev_ss);
3703 dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
3707 xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
3708 DL_FLAG_PM_RUNTIME |
3710 if (!xudc->genpd_dl_device) {
3711 dev_err(dev, "failed to add USB device link\n");
3715 xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
3716 DL_FLAG_PM_RUNTIME |
3718 if (!xudc->genpd_dl_ss) {
3719 dev_err(dev, "failed to add SuperSpeed device link\n");
3726 static int tegra_xudc_probe(struct platform_device *pdev)
3728 struct tegra_xudc *xudc;
3729 struct resource *res;
3733 xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
3737 xudc->dev = &pdev->dev;
3738 platform_set_drvdata(pdev, xudc);
3740 xudc->soc = of_device_get_match_data(&pdev->dev);
3744 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3745 xudc->base = devm_ioremap_resource(&pdev->dev, res);
3746 if (IS_ERR(xudc->base))
3747 return PTR_ERR(xudc->base);
3748 xudc->phys_base = res->start;
3750 xudc->fpci = devm_platform_ioremap_resource_byname(pdev, "fpci");
3751 if (IS_ERR(xudc->fpci))
3752 return PTR_ERR(xudc->fpci);
3754 if (xudc->soc->has_ipfs) {
3755 xudc->ipfs = devm_platform_ioremap_resource_byname(pdev, "ipfs");
3756 if (IS_ERR(xudc->ipfs))
3757 return PTR_ERR(xudc->ipfs);
3760 xudc->irq = platform_get_irq(pdev, 0);
3764 err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
3765 dev_name(&pdev->dev), xudc);
3767 dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
3772 xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
3777 for (i = 0; i < xudc->soc->num_clks; i++)
3778 xudc->clks[i].id = xudc->soc->clock_names[i];
3780 err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
3782 dev_err_probe(xudc->dev, err, "failed to request clocks\n");
3786 xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
3787 sizeof(*xudc->supplies), GFP_KERNEL);
3788 if (!xudc->supplies)
3791 for (i = 0; i < xudc->soc->num_supplies; i++)
3792 xudc->supplies[i].supply = xudc->soc->supply_names[i];
3794 err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
3797 dev_err_probe(xudc->dev, err, "failed to request regulators\n");
3801 xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
3802 if (IS_ERR(xudc->padctl))
3803 return PTR_ERR(xudc->padctl);
3805 err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
3807 dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
3811 err = tegra_xudc_phy_get(xudc);
3813 goto disable_regulator;
3815 err = tegra_xudc_powerdomain_init(xudc);
3817 goto put_powerdomains;
3819 err = tegra_xudc_phy_init(xudc);
3821 goto put_powerdomains;
3823 err = tegra_xudc_alloc_event_ring(xudc);
3827 err = tegra_xudc_alloc_eps(xudc);
3829 goto free_event_ring;
3831 spin_lock_init(&xudc->lock);
3833 init_completion(&xudc->disconnect_complete);
3835 INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
3837 INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
3839 INIT_DELAYED_WORK(&xudc->port_reset_war_work,
3840 tegra_xudc_port_reset_war_work);
3842 pm_runtime_enable(&pdev->dev);
3844 xudc->gadget.ops = &tegra_xudc_gadget_ops;
3845 xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
3846 xudc->gadget.name = "tegra-xudc";
3847 xudc->gadget.max_speed = USB_SPEED_SUPER;
3849 err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
3851 dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
3858 pm_runtime_disable(&pdev->dev);
3859 tegra_xudc_free_eps(xudc);
3861 tegra_xudc_free_event_ring(xudc);
3863 tegra_xudc_phy_exit(xudc);
3865 tegra_xudc_powerdomain_remove(xudc);
3867 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3869 tegra_xusb_padctl_put(xudc->padctl);
3874 static int tegra_xudc_remove(struct platform_device *pdev)
3876 struct tegra_xudc *xudc = platform_get_drvdata(pdev);
3879 pm_runtime_get_sync(xudc->dev);
3881 cancel_delayed_work_sync(&xudc->plc_reset_work);
3882 cancel_work_sync(&xudc->usb_role_sw_work);
3884 usb_del_gadget_udc(&xudc->gadget);
3886 tegra_xudc_free_eps(xudc);
3887 tegra_xudc_free_event_ring(xudc);
3889 tegra_xudc_powerdomain_remove(xudc);
3891 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3893 for (i = 0; i < xudc->soc->num_phys; i++) {
3894 phy_power_off(xudc->utmi_phy[i]);
3895 phy_power_off(xudc->usb3_phy[i]);
3898 tegra_xudc_phy_exit(xudc);
3900 pm_runtime_disable(xudc->dev);
3901 pm_runtime_put(xudc->dev);
3903 tegra_xusb_padctl_put(xudc->padctl);
3908 static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
3910 unsigned long flags;
3912 dev_dbg(xudc->dev, "entering ELPG\n");
3914 spin_lock_irqsave(&xudc->lock, flags);
3916 xudc->powergated = true;
3917 xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
3918 xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
3919 xudc_writel(xudc, 0, CTRL);
3921 spin_unlock_irqrestore(&xudc->lock, flags);
3923 clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
3925 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3927 dev_dbg(xudc->dev, "entering ELPG done\n");
3931 static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
3933 unsigned long flags;
3936 dev_dbg(xudc->dev, "exiting ELPG\n");
3938 err = regulator_bulk_enable(xudc->soc->num_supplies,
3943 err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
3947 tegra_xudc_fpci_ipfs_init(xudc);
3949 tegra_xudc_device_params_init(xudc);
3951 tegra_xudc_init_event_ring(xudc);
3953 tegra_xudc_init_eps(xudc);
3955 xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
3956 xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
3958 spin_lock_irqsave(&xudc->lock, flags);
3959 xudc->powergated = false;
3960 spin_unlock_irqrestore(&xudc->lock, flags);
3962 dev_dbg(xudc->dev, "exiting ELPG done\n");
3966 static int __maybe_unused tegra_xudc_suspend(struct device *dev)
3968 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3969 unsigned long flags;
3971 spin_lock_irqsave(&xudc->lock, flags);
3972 xudc->suspended = true;
3973 spin_unlock_irqrestore(&xudc->lock, flags);
3975 flush_work(&xudc->usb_role_sw_work);
3977 if (!pm_runtime_status_suspended(dev)) {
3978 /* Forcibly disconnect before powergating. */
3979 tegra_xudc_device_mode_off(xudc);
3980 tegra_xudc_powergate(xudc);
3983 pm_runtime_disable(dev);
3988 static int __maybe_unused tegra_xudc_resume(struct device *dev)
3990 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3991 unsigned long flags;
3994 err = tegra_xudc_unpowergate(xudc);
3998 spin_lock_irqsave(&xudc->lock, flags);
3999 xudc->suspended = false;
4000 spin_unlock_irqrestore(&xudc->lock, flags);
4002 schedule_work(&xudc->usb_role_sw_work);
4004 pm_runtime_enable(dev);
4009 static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
4011 struct tegra_xudc *xudc = dev_get_drvdata(dev);
4013 return tegra_xudc_powergate(xudc);
4016 static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
4018 struct tegra_xudc *xudc = dev_get_drvdata(dev);
4020 return tegra_xudc_unpowergate(xudc);
4023 static const struct dev_pm_ops tegra_xudc_pm_ops = {
4024 SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
4025 SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
4026 tegra_xudc_runtime_resume, NULL)
4029 static struct platform_driver tegra_xudc_driver = {
4030 .probe = tegra_xudc_probe,
4031 .remove = tegra_xudc_remove,
4033 .name = "tegra-xudc",
4034 .pm = &tegra_xudc_pm_ops,
4035 .of_match_table = tegra_xudc_of_match,
4038 module_platform_driver(tegra_xudc_driver);
4040 MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
4041 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
4042 MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
4043 MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
4044 MODULE_LICENSE("GPL v2");