GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / net / wireless / realtek / rtw89 / ser.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4
5 #include <linux/devcoredump.h>
6
7 #include "cam.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "ps.h"
12 #include "reg.h"
13 #include "ser.h"
14 #include "util.h"
15
16 #define SER_RECFG_TIMEOUT 1000
17
18 enum ser_evt {
19         SER_EV_NONE,
20         SER_EV_STATE_IN,
21         SER_EV_STATE_OUT,
22         SER_EV_L1_RESET, /* M1 */
23         SER_EV_DO_RECOVERY, /* M3 */
24         SER_EV_MAC_RESET_DONE, /* M5 */
25         SER_EV_L2_RESET,
26         SER_EV_L2_RECFG_DONE,
27         SER_EV_L2_RECFG_TIMEOUT,
28         SER_EV_M3_TIMEOUT,
29         SER_EV_FW_M5_TIMEOUT,
30         SER_EV_L0_RESET,
31         SER_EV_MAXX
32 };
33
34 enum ser_state {
35         SER_IDLE_ST,
36         SER_RESET_TRX_ST,
37         SER_DO_HCI_ST,
38         SER_L2_RESET_ST,
39         SER_ST_MAX_ST
40 };
41
42 struct ser_msg {
43         struct list_head list;
44         u8 event;
45 };
46
47 struct state_ent {
48         u8 state;
49         char *name;
50         void (*st_func)(struct rtw89_ser *ser, u8 event);
51 };
52
53 struct event_ent {
54         u8 event;
55         char *name;
56 };
57
58 static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
59 {
60         if (event < SER_EV_MAXX)
61                 return ser->ev_tbl[event].name;
62
63         return "err_ev_name";
64 }
65
66 static char *ser_st_name(struct rtw89_ser *ser)
67 {
68         if (ser->state < SER_ST_MAX_ST)
69                 return ser->st_tbl[ser->state].name;
70
71         return "err_st_name";
72 }
73
74 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
75 struct ser_cd_ ## _name { \
76         u32 type; \
77         u32 type_size; \
78         u64 padding; \
79         u8 data[_size]; \
80 } __packed; \
81 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
82 { \
83         p->type = _type; \
84         p->type_size = sizeof(p->data); \
85         p->padding = 0x0123456789abcdef; \
86 }
87
88 enum rtw89_ser_cd_type {
89         RTW89_SER_CD_FW_RSVD_PLE        = 0,
90         RTW89_SER_CD_FW_BACKTRACE       = 1,
91 };
92
93 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
94                       RTW89_SER_CD_FW_RSVD_PLE,
95                       RTW89_FW_RSVD_PLE_SIZE);
96
97 RTW89_DEF_SER_CD_TYPE(fw_backtrace,
98                       RTW89_SER_CD_FW_BACKTRACE,
99                       RTW89_FW_BACKTRACE_MAX_SIZE);
100
101 struct rtw89_ser_cd_buffer {
102         struct ser_cd_fw_rsvd_ple fwple;
103         struct ser_cd_fw_backtrace fwbt;
104 } __packed;
105
106 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
107 {
108         struct rtw89_ser_cd_buffer *buf;
109
110         buf = vzalloc(sizeof(*buf));
111         if (!buf)
112                 return NULL;
113
114         ser_cd_fw_rsvd_ple_init(&buf->fwple);
115         ser_cd_fw_backtrace_init(&buf->fwbt);
116
117         return buf;
118 }
119
120 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
121                               struct rtw89_ser_cd_buffer *buf)
122 {
123         rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
124
125         /* After calling dev_coredump, buf's lifetime is supposed to be
126          * handled by the device coredump framework. Note that a new dump
127          * will be discarded if a previous one hasn't been released by
128          * framework yet.
129          */
130         dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
131 }
132
133 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
134                               struct rtw89_ser_cd_buffer *buf, bool free_self)
135 {
136         if (!free_self)
137                 return;
138
139         rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
140
141         /* When some problems happen during filling data of core dump,
142          * we won't send it to device coredump framework. Instead, we
143          * free buf by ourselves.
144          */
145         vfree(buf);
146 }
147
148 static void ser_state_run(struct rtw89_ser *ser, u8 evt)
149 {
150         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
151
152         rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
153                     ser_st_name(ser), ser_ev_name(ser, evt));
154
155         rtw89_leave_lps(rtwdev);
156         ser->st_tbl[ser->state].st_func(ser, evt);
157 }
158
159 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
160 {
161         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
162
163         if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
164                 return;
165         ser_state_run(ser, SER_EV_STATE_OUT);
166
167         rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
168                     ser_st_name(ser), ser->st_tbl[new_state].name);
169
170         ser->state = new_state;
171         ser_state_run(ser, SER_EV_STATE_IN);
172 }
173
174 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
175 {
176         struct ser_msg *msg;
177
178         spin_lock_irq(&ser->msg_q_lock);
179         msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
180         if (msg)
181                 list_del(&msg->list);
182         spin_unlock_irq(&ser->msg_q_lock);
183
184         return msg;
185 }
186
187 static void rtw89_ser_hdl_work(struct work_struct *work)
188 {
189         struct ser_msg *msg;
190         struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
191                                              ser_hdl_work);
192
193         while ((msg = __rtw89_ser_dequeue_msg(ser))) {
194                 ser_state_run(ser, msg->event);
195                 kfree(msg);
196         }
197 }
198
199 static int ser_send_msg(struct rtw89_ser *ser, u8 event)
200 {
201         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
202         struct ser_msg *msg = NULL;
203
204         if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
205                 return -EIO;
206
207         msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
208         if (!msg)
209                 return -ENOMEM;
210
211         msg->event = event;
212
213         spin_lock_irq(&ser->msg_q_lock);
214         list_add(&msg->list, &ser->msg_q);
215         spin_unlock_irq(&ser->msg_q_lock);
216
217         ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
218         return 0;
219 }
220
221 static void rtw89_ser_alarm_work(struct work_struct *work)
222 {
223         struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
224                                              ser_alarm_work.work);
225
226         ser_send_msg(ser, ser->alarm_event);
227         ser->alarm_event = SER_EV_NONE;
228 }
229
230 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
231 {
232         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
233
234         if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
235                 return;
236
237         ser->alarm_event = event;
238         ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
239                                      msecs_to_jiffies(ms));
240 }
241
242 static void ser_del_alarm(struct rtw89_ser *ser)
243 {
244         cancel_delayed_work(&ser->ser_alarm_work);
245         ser->alarm_event = SER_EV_NONE;
246 }
247
248 /* driver function */
249 static void drv_stop_tx(struct rtw89_ser *ser)
250 {
251         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
252
253         ieee80211_stop_queues(rtwdev->hw);
254         set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
255 }
256
257 static void drv_stop_rx(struct rtw89_ser *ser)
258 {
259         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
260
261         clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
262         set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
263 }
264
265 static void drv_trx_reset(struct rtw89_ser *ser)
266 {
267         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
268
269         rtw89_hci_reset(rtwdev);
270 }
271
272 static void drv_resume_tx(struct rtw89_ser *ser)
273 {
274         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
275
276         if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
277                 return;
278
279         ieee80211_wake_queues(rtwdev->hw);
280         clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
281 }
282
283 static void drv_resume_rx(struct rtw89_ser *ser)
284 {
285         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
286
287         if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
288                 return;
289
290         set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
291         clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
292 }
293
294 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
295 {
296         rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
297         rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
298         rtwvif->trigger = false;
299 }
300
301 static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
302 {
303         struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
304         struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
305
306         rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
307 }
308
309 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
310 {
311         if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
312                 ieee80211_iterate_stations_atomic(rtwdev->hw,
313                                                   ser_sta_deinit_addr_cam_iter,
314                                                   rtwdev);
315
316         rtw89_cam_deinit(rtwdev, rtwvif);
317 }
318
319 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
320 {
321         struct rtw89_vif *rtwvif;
322
323         rtw89_cam_reset_keys(rtwdev);
324         rtw89_for_each_rtwvif(rtwdev, rtwvif)
325                 ser_deinit_cam(rtwdev, rtwvif);
326
327         rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
328         rtw89_for_each_rtwvif(rtwdev, rtwvif)
329                 ser_reset_vif(rtwdev, rtwvif);
330 }
331
332 /* hal function */
333 static int hal_enable_dma(struct rtw89_ser *ser)
334 {
335         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
336         int ret;
337
338         if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
339                 return 0;
340
341         if (!rtwdev->hci.ops->mac_lv1_rcvy)
342                 return -EIO;
343
344         ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
345         if (!ret)
346                 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
347
348         return ret;
349 }
350
351 static int hal_stop_dma(struct rtw89_ser *ser)
352 {
353         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
354         int ret;
355
356         if (!rtwdev->hci.ops->mac_lv1_rcvy)
357                 return -EIO;
358
359         ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
360         if (!ret)
361                 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
362
363         return ret;
364 }
365
366 static void hal_send_m2_event(struct rtw89_ser *ser)
367 {
368         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
369
370         rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
371 }
372
373 static void hal_send_m4_event(struct rtw89_ser *ser)
374 {
375         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
376
377         rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
378 }
379
380 /* state handler */
381 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
382 {
383         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
384
385         switch (evt) {
386         case SER_EV_STATE_IN:
387                 rtw89_hci_recovery_complete(rtwdev);
388                 break;
389         case SER_EV_L1_RESET:
390                 ser_state_goto(ser, SER_RESET_TRX_ST);
391                 break;
392         case SER_EV_L2_RESET:
393                 ser_state_goto(ser, SER_L2_RESET_ST);
394                 break;
395         case SER_EV_STATE_OUT:
396                 rtw89_hci_recovery_start(rtwdev);
397                 break;
398         default:
399                 break;
400         }
401 }
402
403 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
404 {
405         switch (evt) {
406         case SER_EV_STATE_IN:
407                 drv_stop_tx(ser);
408
409                 if (hal_stop_dma(ser)) {
410                         ser_state_goto(ser, SER_L2_RESET_ST);
411                         break;
412                 }
413
414                 drv_stop_rx(ser);
415                 drv_trx_reset(ser);
416
417                 /* wait m3 */
418                 hal_send_m2_event(ser);
419
420                 /* set alarm to prevent FW response timeout */
421                 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
422                 break;
423
424         case SER_EV_DO_RECOVERY:
425                 ser_state_goto(ser, SER_DO_HCI_ST);
426                 break;
427
428         case SER_EV_M3_TIMEOUT:
429                 ser_state_goto(ser, SER_L2_RESET_ST);
430                 break;
431
432         case SER_EV_STATE_OUT:
433                 ser_del_alarm(ser);
434                 hal_enable_dma(ser);
435                 drv_resume_rx(ser);
436                 drv_resume_tx(ser);
437                 break;
438
439         default:
440                 break;
441         }
442 }
443
444 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
445 {
446         switch (evt) {
447         case SER_EV_STATE_IN:
448                 /* wait m5 */
449                 hal_send_m4_event(ser);
450
451                 /* prevent FW response timeout */
452                 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
453                 break;
454
455         case SER_EV_FW_M5_TIMEOUT:
456                 ser_state_goto(ser, SER_L2_RESET_ST);
457                 break;
458
459         case SER_EV_MAC_RESET_DONE:
460                 ser_state_goto(ser, SER_IDLE_ST);
461                 break;
462
463         case SER_EV_STATE_OUT:
464                 ser_del_alarm(ser);
465                 break;
466
467         default:
468                 break;
469         }
470 }
471
472 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
473                              u8 sel, u32 start_addr, u32 len)
474 {
475         u32 *ptr = (u32 *)buf;
476         u32 base_addr, start_page, residue;
477         u32 cnt = 0;
478         u32 i;
479
480         start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
481         residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
482         base_addr = rtw89_mac_mem_base_addrs[sel];
483         base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
484
485         while (cnt < len) {
486                 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
487
488                 for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
489                      i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
490                      i += 4, ptr++) {
491                         *ptr = rtw89_read32(rtwdev, i);
492                         cnt += 4;
493                         if (cnt >= len)
494                                 break;
495                 }
496
497                 residue = 0;
498                 base_addr += MAC_MEM_DUMP_PAGE_SIZE;
499         }
500 }
501
502 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
503 {
504         u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
505
506         rtw89_debug(rtwdev, RTW89_DBG_SER,
507                     "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
508                     start_addr);
509         ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
510                          RTW89_FW_RSVD_PLE_SIZE);
511 }
512
513 struct __fw_backtrace_entry {
514         u32 wcpu_addr;
515         u32 size;
516         u32 key;
517 } __packed;
518
519 struct __fw_backtrace_info {
520         u32 ra;
521         u32 sp;
522 } __packed;
523
524 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
525               sizeof(struct __fw_backtrace_info));
526
527 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
528                                        const struct __fw_backtrace_entry *ent)
529 {
530         struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
531         u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
532         u32 fwbt_size = ent->size;
533         u32 fwbt_key = ent->key;
534         u32 i;
535
536         if (fwbt_addr == 0) {
537                 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
538                            fwbt_addr);
539                 return -EINVAL;
540         }
541
542         if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
543                 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
544                            fwbt_key);
545                 return -EINVAL;
546         }
547
548         if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
549             fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
550                 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
551                            fwbt_size);
552                 return -EINVAL;
553         }
554
555         rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
556         rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
557
558         for (i = R_AX_INDIR_ACCESS_ENTRY;
559              i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
560              i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
561                 *ptr = (struct __fw_backtrace_info){
562                         .ra = rtw89_read32(rtwdev, i),
563                         .sp = rtw89_read32(rtwdev, i + 4),
564                 };
565                 rtw89_debug(rtwdev, RTW89_DBG_SER,
566                             "next sp: 0x%x, next ra: 0x%x\n",
567                             ptr->sp, ptr->ra);
568         }
569
570         rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
571         return 0;
572 }
573
574 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
575 {
576         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
577         struct rtw89_ser_cd_buffer *buf;
578         struct __fw_backtrace_entry fwbt_ent;
579         int ret = 0;
580
581         buf = rtw89_ser_cd_prep(rtwdev);
582         if (!buf) {
583                 ret = -ENOMEM;
584                 goto bottom;
585         }
586
587         rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
588
589         fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
590         ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
591         if (ret)
592                 goto bottom;
593
594         rtw89_ser_cd_send(rtwdev, buf);
595
596 bottom:
597         rtw89_ser_cd_free(rtwdev, buf, !!ret);
598
599         ser_reset_mac_binding(rtwdev);
600         rtw89_core_stop(rtwdev);
601         INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
602 }
603
604 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
605 {
606         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
607
608         switch (evt) {
609         case SER_EV_STATE_IN:
610                 mutex_lock(&rtwdev->mutex);
611                 ser_l2_reset_st_pre_hdl(ser);
612                 mutex_unlock(&rtwdev->mutex);
613
614                 ieee80211_restart_hw(rtwdev->hw);
615                 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
616                 break;
617
618         case SER_EV_L2_RECFG_TIMEOUT:
619                 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
620                 fallthrough;
621         case SER_EV_L2_RECFG_DONE:
622                 ser_state_goto(ser, SER_IDLE_ST);
623                 clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
624                 break;
625
626         case SER_EV_STATE_OUT:
627                 ser_del_alarm(ser);
628                 break;
629
630         default:
631                 break;
632         }
633 }
634
635 static const struct event_ent ser_ev_tbl[] = {
636         {SER_EV_NONE, "SER_EV_NONE"},
637         {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
638         {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
639         {SER_EV_L1_RESET, "SER_EV_L1_RESET"},
640         {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
641         {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
642         {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
643         {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
644         {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
645         {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
646         {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
647         {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
648         {SER_EV_MAXX, "SER_EV_MAX"}
649 };
650
651 static const struct state_ent ser_st_tbl[] = {
652         {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
653         {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
654         {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
655         {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
656 };
657
658 int rtw89_ser_init(struct rtw89_dev *rtwdev)
659 {
660         struct rtw89_ser *ser = &rtwdev->ser;
661
662         memset(ser, 0, sizeof(*ser));
663         INIT_LIST_HEAD(&ser->msg_q);
664         ser->state = SER_IDLE_ST;
665         ser->st_tbl = ser_st_tbl;
666         ser->ev_tbl = ser_ev_tbl;
667
668         bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
669         spin_lock_init(&ser->msg_q_lock);
670         INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
671         INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
672         return 0;
673 }
674
675 int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
676 {
677         struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
678
679         set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
680         cancel_delayed_work_sync(&ser->ser_alarm_work);
681         cancel_work_sync(&ser->ser_hdl_work);
682         clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
683         return 0;
684 }
685
686 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
687 {
688         ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
689 }
690
691 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
692 {
693         u8 event = SER_EV_NONE;
694
695         rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
696
697         switch (err) {
698         case MAC_AX_ERR_L1_ERR_DMAC:
699         case MAC_AX_ERR_L0_PROMOTE_TO_L1:
700                 event = SER_EV_L1_RESET; /* M1 */
701                 break;
702         case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
703                 event = SER_EV_DO_RECOVERY; /* M3 */
704                 break;
705         case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
706                 event = SER_EV_MAC_RESET_DONE; /* M5 */
707                 break;
708         case MAC_AX_ERR_L0_ERR_CMAC0:
709         case MAC_AX_ERR_L0_ERR_CMAC1:
710         case MAC_AX_ERR_L0_RESET_DONE:
711                 event = SER_EV_L0_RESET;
712                 break;
713         default:
714                 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
715                     (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
716                      err <= MAC_AX_GET_ERR_MAX))
717                         event = SER_EV_L2_RESET;
718                 break;
719         }
720
721         if (event == SER_EV_NONE) {
722                 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
723                 return -EINVAL;
724         }
725
726         ser_send_msg(&rtwdev->ser, event);
727         return 0;
728 }
729 EXPORT_SYMBOL(rtw89_ser_notify);