2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
24 #include "fjes_trace.h"
26 static void fjes_hw_update_zone_task(struct work_struct *);
27 static void fjes_hw_epstop_task(struct work_struct *);
29 /* supported MTU list */
30 const u32 fjes_support_mtu[] = {
31 FJES_MTU_DEFINE(8 * 1024),
32 FJES_MTU_DEFINE(16 * 1024),
33 FJES_MTU_DEFINE(32 * 1024),
34 FJES_MTU_DEFINE(64 * 1024),
38 u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
43 value = readl(&base[reg]);
48 static u8 *fjes_hw_iomap(struct fjes_hw *hw)
52 if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
54 pr_err("request_mem_region failed\n");
58 base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
63 static void fjes_hw_iounmap(struct fjes_hw *hw)
66 release_mem_region(hw->hw_res.start, hw->hw_res.size);
69 int fjes_hw_reset(struct fjes_hw *hw)
76 wr32(XSCT_DCTL, dctl.reg);
78 timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
79 dctl.reg = rd32(XSCT_DCTL);
80 while ((dctl.bits.reset == 1) && (timeout > 0)) {
82 dctl.reg = rd32(XSCT_DCTL);
86 return timeout > 0 ? 0 : -EIO;
89 static int fjes_hw_get_max_epid(struct fjes_hw *hw)
91 union REG_MAX_EP info;
93 info.reg = rd32(XSCT_MAX_EP);
95 return info.bits.maxep;
98 static int fjes_hw_get_my_epid(struct fjes_hw *hw)
100 union REG_OWNER_EPID info;
102 info.reg = rd32(XSCT_OWNER_EPID);
104 return info.bits.epid;
107 static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
111 size = sizeof(struct fjes_device_shared_info) +
112 (sizeof(u8) * hw->max_epid);
113 hw->hw_info.share = kzalloc(size, GFP_KERNEL);
114 if (!hw->hw_info.share)
117 hw->hw_info.share->epnum = hw->max_epid;
122 static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
124 kfree(hw->hw_info.share);
125 hw->hw_info.share = NULL;
128 static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
132 mem = vzalloc(EP_BUFFER_SIZE);
137 epbh->size = EP_BUFFER_SIZE;
139 epbh->info = (union ep_buffer_info *)mem;
140 epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
145 static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
155 void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
157 union ep_buffer_info *info = epbh->info;
158 u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
161 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
162 vlan_id[i] = info->v1i.vlan_id[i];
164 memset(info, 0, sizeof(union ep_buffer_info));
166 info->v1i.version = 0; /* version 0 */
168 for (i = 0; i < ETH_ALEN; i++)
169 info->v1i.mac_addr[i] = mac_addr[i];
174 info->v1i.info_size = sizeof(union ep_buffer_info);
175 info->v1i.buffer_size = epbh->size - info->v1i.info_size;
177 info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
178 info->v1i.count_max =
179 EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
181 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
182 info->v1i.vlan_id[i] = vlan_id[i];
184 info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
188 fjes_hw_init_command_registers(struct fjes_hw *hw,
189 struct fjes_device_command_param *param)
191 /* Request Buffer length */
192 wr32(XSCT_REQBL, (__le32)(param->req_len));
193 /* Response Buffer Length */
194 wr32(XSCT_RESPBL, (__le32)(param->res_len));
196 /* Request Buffer Address */
198 (__le32)(param->req_start & GENMASK_ULL(31, 0)));
200 (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
202 /* Response Buffer Address */
204 (__le32)(param->res_start & GENMASK_ULL(31, 0)));
206 (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
208 /* Share status address */
210 (__le32)(param->share_start & GENMASK_ULL(31, 0)));
212 (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
215 static int fjes_hw_setup(struct fjes_hw *hw)
217 u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
218 struct fjes_device_command_param param;
219 struct ep_share_mem_info *buf_pair;
226 hw->hw_info.max_epid = &hw->max_epid;
227 hw->hw_info.my_epid = &hw->my_epid;
229 buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
234 hw->ep_shm_info = (struct ep_share_mem_info *)buf;
236 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
237 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
238 if (!(hw->hw_info.req_buf)) {
243 hw->hw_info.req_buf_size = mem_size;
245 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
246 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
247 if (!(hw->hw_info.res_buf)) {
252 hw->hw_info.res_buf_size = mem_size;
254 result = fjes_hw_alloc_shared_status_region(hw);
258 hw->hw_info.buffer_share_bit = 0;
259 hw->hw_info.buffer_unshare_reserve_bit = 0;
261 for (epidx = 0; epidx < hw->max_epid; epidx++) {
262 if (epidx != hw->my_epid) {
263 buf_pair = &hw->ep_shm_info[epidx];
265 result = fjes_hw_alloc_epbuf(&buf_pair->tx);
269 result = fjes_hw_alloc_epbuf(&buf_pair->rx);
273 spin_lock_irqsave(&hw->rx_status_lock, flags);
274 fjes_hw_setup_epbuf(&buf_pair->tx, mac,
275 fjes_support_mtu[0]);
276 fjes_hw_setup_epbuf(&buf_pair->rx, mac,
277 fjes_support_mtu[0]);
278 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
282 memset(¶m, 0, sizeof(param));
284 param.req_len = hw->hw_info.req_buf_size;
285 param.req_start = __pa(hw->hw_info.req_buf);
286 param.res_len = hw->hw_info.res_buf_size;
287 param.res_start = __pa(hw->hw_info.res_buf);
289 param.share_start = __pa(hw->hw_info.share->ep_status);
291 fjes_hw_init_command_registers(hw, ¶m);
296 for (epidx = 0; epidx < hw->max_epid ; epidx++) {
297 if (epidx == hw->my_epid)
299 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
300 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
302 fjes_hw_free_shared_status_region(hw);
304 kfree(hw->hw_info.res_buf);
305 hw->hw_info.res_buf = NULL;
307 kfree(hw->hw_info.req_buf);
308 hw->hw_info.req_buf = NULL;
310 kfree(hw->ep_shm_info);
311 hw->ep_shm_info = NULL;
315 static void fjes_hw_cleanup(struct fjes_hw *hw)
319 if (!hw->ep_shm_info)
322 fjes_hw_free_shared_status_region(hw);
324 kfree(hw->hw_info.req_buf);
325 hw->hw_info.req_buf = NULL;
327 kfree(hw->hw_info.res_buf);
328 hw->hw_info.res_buf = NULL;
330 for (epidx = 0; epidx < hw->max_epid ; epidx++) {
331 if (epidx == hw->my_epid)
333 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
334 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
337 kfree(hw->ep_shm_info);
338 hw->ep_shm_info = NULL;
341 int fjes_hw_init(struct fjes_hw *hw)
345 hw->base = fjes_hw_iomap(hw);
349 ret = fjes_hw_reset(hw);
353 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
355 INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
356 INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
358 mutex_init(&hw->hw_info.lock);
359 spin_lock_init(&hw->rx_status_lock);
361 hw->max_epid = fjes_hw_get_max_epid(hw);
362 hw->my_epid = fjes_hw_get_my_epid(hw);
364 if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
367 ret = fjes_hw_setup(hw);
369 hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
370 hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
375 void fjes_hw_exit(struct fjes_hw *hw)
381 if (hw->debug_mode) {
382 /* disable debug mode */
383 mutex_lock(&hw->hw_info.lock);
384 fjes_hw_stop_debug(hw);
385 mutex_unlock(&hw->hw_info.lock);
387 vfree(hw->hw_info.trace);
388 hw->hw_info.trace = NULL;
389 hw->hw_info.trace_size = 0;
392 ret = fjes_hw_reset(hw);
394 pr_err("%s: reset error", __func__);
402 cancel_work_sync(&hw->update_zone_task);
403 cancel_work_sync(&hw->epstop_task);
406 static enum fjes_dev_command_response_e
407 fjes_hw_issue_request_command(struct fjes_hw *hw,
408 enum fjes_dev_command_request_type type)
410 enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
413 int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
416 cr.bits.req_start = 1;
417 cr.bits.req_code = type;
418 wr32(XSCT_CR, cr.reg);
419 cr.reg = rd32(XSCT_CR);
421 if (cr.bits.error == 0) {
422 timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
423 cs.reg = rd32(XSCT_CS);
425 while ((cs.bits.complete != 1) && timeout > 0) {
427 cs.reg = rd32(XSCT_CS);
431 if (cs.bits.complete == 1)
432 ret = FJES_CMD_STATUS_NORMAL;
433 else if (timeout <= 0)
434 ret = FJES_CMD_STATUS_TIMEOUT;
437 switch (cr.bits.err_info) {
438 case FJES_CMD_REQ_ERR_INFO_PARAM:
439 ret = FJES_CMD_STATUS_ERROR_PARAM;
441 case FJES_CMD_REQ_ERR_INFO_STATUS:
442 ret = FJES_CMD_STATUS_ERROR_STATUS;
445 ret = FJES_CMD_STATUS_UNKNOWN;
450 trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
455 int fjes_hw_request_info(struct fjes_hw *hw)
457 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
458 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
459 enum fjes_dev_command_response_e ret;
462 memset(req_buf, 0, hw->hw_info.req_buf_size);
463 memset(res_buf, 0, hw->hw_info.res_buf_size);
465 req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
467 res_buf->info.length = 0;
468 res_buf->info.code = 0;
470 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
471 trace_fjes_hw_request_info(hw, res_buf);
475 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
476 res_buf->info.length) {
477 trace_fjes_hw_request_info_err("Invalid res_buf");
479 } else if (ret == FJES_CMD_STATUS_NORMAL) {
480 switch (res_buf->info.code) {
481 case FJES_CMD_REQ_RES_CODE_NORMAL:
490 case FJES_CMD_STATUS_UNKNOWN:
493 case FJES_CMD_STATUS_TIMEOUT:
494 trace_fjes_hw_request_info_err("Timeout");
497 case FJES_CMD_STATUS_ERROR_PARAM:
500 case FJES_CMD_STATUS_ERROR_STATUS:
512 int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
513 struct ep_share_mem_info *buf_pair)
515 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
516 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
517 enum fjes_dev_command_response_e ret;
524 if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
527 memset(req_buf, 0, hw->hw_info.req_buf_size);
528 memset(res_buf, 0, hw->hw_info.res_buf_size);
530 req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
533 req_buf->share_buffer.epid = dest_epid;
536 req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
537 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
538 for (i = 0; i < page_count; i++) {
539 addr = ((u8 *)(buf_pair->tx.buffer)) +
540 (i * EP_BUFFER_INFO_SIZE);
541 req_buf->share_buffer.buffer[idx++] =
542 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
543 offset_in_page(addr));
546 req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
547 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
548 for (i = 0; i < page_count; i++) {
549 addr = ((u8 *)(buf_pair->rx.buffer)) +
550 (i * EP_BUFFER_INFO_SIZE);
551 req_buf->share_buffer.buffer[idx++] =
552 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
553 offset_in_page(addr));
556 res_buf->share_buffer.length = 0;
557 res_buf->share_buffer.code = 0;
559 trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
561 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
563 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
564 while ((ret == FJES_CMD_STATUS_NORMAL) &&
565 (res_buf->share_buffer.length ==
566 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
567 (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
569 msleep(200 + hw->my_epid * 20);
570 timeout -= (200 + hw->my_epid * 20);
572 res_buf->share_buffer.length = 0;
573 res_buf->share_buffer.code = 0;
575 ret = fjes_hw_issue_request_command(
576 hw, FJES_CMD_REQ_SHARE_BUFFER);
581 trace_fjes_hw_register_buff_addr(res_buf, timeout);
583 if (res_buf->share_buffer.length !=
584 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
585 trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
587 } else if (ret == FJES_CMD_STATUS_NORMAL) {
588 switch (res_buf->share_buffer.code) {
589 case FJES_CMD_REQ_RES_CODE_NORMAL:
591 set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
593 case FJES_CMD_REQ_RES_CODE_BUSY:
594 trace_fjes_hw_register_buff_addr_err("Busy Timeout");
603 case FJES_CMD_STATUS_UNKNOWN:
606 case FJES_CMD_STATUS_TIMEOUT:
607 trace_fjes_hw_register_buff_addr_err("Timeout");
610 case FJES_CMD_STATUS_ERROR_PARAM:
611 case FJES_CMD_STATUS_ERROR_STATUS:
621 int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
623 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
624 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
625 struct fjes_device_shared_info *share = hw->hw_info.share;
626 enum fjes_dev_command_response_e ret;
633 if (!req_buf || !res_buf || !share)
636 if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
639 memset(req_buf, 0, hw->hw_info.req_buf_size);
640 memset(res_buf, 0, hw->hw_info.res_buf_size);
642 req_buf->unshare_buffer.length =
643 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
644 req_buf->unshare_buffer.epid = dest_epid;
646 res_buf->unshare_buffer.length = 0;
647 res_buf->unshare_buffer.code = 0;
649 trace_fjes_hw_unregister_buff_addr_req(req_buf);
650 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
652 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
653 while ((ret == FJES_CMD_STATUS_NORMAL) &&
654 (res_buf->unshare_buffer.length ==
655 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
656 (res_buf->unshare_buffer.code ==
657 FJES_CMD_REQ_RES_CODE_BUSY) &&
659 msleep(200 + hw->my_epid * 20);
660 timeout -= (200 + hw->my_epid * 20);
662 res_buf->unshare_buffer.length = 0;
663 res_buf->unshare_buffer.code = 0;
666 fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
671 trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
673 if (res_buf->unshare_buffer.length !=
674 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
675 trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
677 } else if (ret == FJES_CMD_STATUS_NORMAL) {
678 switch (res_buf->unshare_buffer.code) {
679 case FJES_CMD_REQ_RES_CODE_NORMAL:
681 clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
683 case FJES_CMD_REQ_RES_CODE_BUSY:
684 trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
693 case FJES_CMD_STATUS_UNKNOWN:
696 case FJES_CMD_STATUS_TIMEOUT:
697 trace_fjes_hw_unregister_buff_addr_err("Timeout");
700 case FJES_CMD_STATUS_ERROR_PARAM:
701 case FJES_CMD_STATUS_ERROR_STATUS:
711 int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
712 enum REG_ICTL_MASK mask)
714 u32 ig = mask | dest_epid;
716 wr32(XSCT_IG, cpu_to_le32(ig));
721 u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
725 cur_is = rd32(XSCT_IS);
730 void fjes_hw_set_irqmask(struct fjes_hw *hw,
731 enum REG_ICTL_MASK intr_mask, bool mask)
734 wr32(XSCT_IMS, intr_mask);
736 wr32(XSCT_IMC, intr_mask);
739 bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
741 if (epid >= hw->max_epid)
744 if ((hw->ep_shm_info[epid].es_status !=
745 FJES_ZONING_STATUS_ENABLE) ||
746 (hw->ep_shm_info[hw->my_epid].zone ==
747 FJES_ZONING_ZONE_TYPE_NONE))
750 return (hw->ep_shm_info[epid].zone ==
751 hw->ep_shm_info[hw->my_epid].zone);
754 int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
759 if (dest_epid < share->epnum)
760 value = share->ep_status[dest_epid];
765 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
767 return test_bit(src_epid, &hw->txrx_stop_req_bit);
770 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
772 return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
773 FJES_RX_STOP_REQ_DONE);
776 enum ep_partner_status
777 fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
779 enum ep_partner_status status;
781 if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
782 if (fjes_hw_epid_is_stop_requested(hw, epid)) {
783 status = EP_PARTNER_WAITING;
785 if (fjes_hw_epid_is_stop_process_done(hw, epid))
786 status = EP_PARTNER_COMPLETE;
788 status = EP_PARTNER_SHARED;
791 status = EP_PARTNER_UNSHARE;
797 void fjes_hw_raise_epstop(struct fjes_hw *hw)
799 enum ep_partner_status status;
803 for (epidx = 0; epidx < hw->max_epid; epidx++) {
804 if (epidx == hw->my_epid)
807 status = fjes_hw_get_partner_ep_status(hw, epidx);
809 case EP_PARTNER_SHARED:
810 fjes_hw_raise_interrupt(hw, epidx,
811 REG_ICTL_MASK_TXRX_STOP_REQ);
812 hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
818 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
819 set_bit(epidx, &hw->txrx_stop_req_bit);
821 spin_lock_irqsave(&hw->rx_status_lock, flags);
822 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
823 FJES_RX_STOP_REQ_REQUEST;
824 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
828 int fjes_hw_wait_epstop(struct fjes_hw *hw)
830 enum ep_partner_status status;
831 union ep_buffer_info *info;
835 while (hw->hw_info.buffer_unshare_reserve_bit &&
836 (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
837 for (epidx = 0; epidx < hw->max_epid; epidx++) {
838 if (epidx == hw->my_epid)
840 status = fjes_hw_epid_is_shared(hw->hw_info.share,
842 info = hw->ep_shm_info[epidx].rx.info;
844 (info->v1i.rx_status &
845 FJES_RX_STOP_REQ_DONE)) &&
847 &hw->hw_info.buffer_unshare_reserve_bit)) {
849 &hw->hw_info.buffer_unshare_reserve_bit);
857 for (epidx = 0; epidx < hw->max_epid; epidx++) {
858 if (epidx == hw->my_epid)
860 if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
862 &hw->hw_info.buffer_unshare_reserve_bit);
865 return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
869 bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
871 union ep_buffer_info *info = epbh->info;
873 return (info->common.version == version);
876 bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
878 union ep_buffer_info *info = epbh->info;
880 return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
881 info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
884 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
886 union ep_buffer_info *info = epbh->info;
893 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
894 if (vlan_id == info->v1i.vlan_id[i]) {
903 bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
905 union ep_buffer_info *info = epbh->info;
908 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
909 if (info->v1i.vlan_id[i] == 0) {
910 info->v1i.vlan_id[i] = vlan_id;
917 void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
919 union ep_buffer_info *info = epbh->info;
923 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
924 if (vlan_id == info->v1i.vlan_id[i])
925 info->v1i.vlan_id[i] = 0;
930 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
932 union ep_buffer_info *info = epbh->info;
934 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
937 if (info->v1i.count_max == 0)
940 return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
941 info->v1i.count_max);
944 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
947 union ep_buffer_info *info = epbh->info;
948 struct esmem_frame *ring_frame;
951 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
953 info->v1i.count_max) *
954 info->v1i.frame_max]);
956 *psize = (size_t)ring_frame->frame_size;
958 frame = ring_frame->frame_data;
963 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
965 union ep_buffer_info *info = epbh->info;
967 if (fjes_hw_epbuf_rx_is_empty(epbh))
970 EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
973 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
974 void *frame, size_t size)
976 union ep_buffer_info *info = epbh->info;
977 struct esmem_frame *ring_frame;
979 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
982 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
984 info->v1i.count_max) *
985 info->v1i.frame_max]);
987 ring_frame->frame_size = size;
988 memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
990 EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
995 static void fjes_hw_update_zone_task(struct work_struct *work)
997 struct fjes_hw *hw = container_of(work,
998 struct fjes_hw, update_zone_task);
1000 struct my_s {u8 es_status; u8 zone; } *info;
1001 union fjes_device_command_res *res_buf;
1002 enum ep_partner_status pstatus;
1004 struct fjes_adapter *adapter;
1005 struct net_device *netdev;
1006 unsigned long flags;
1008 ulong unshare_bit = 0;
1009 ulong share_bit = 0;
1015 adapter = (struct fjes_adapter *)hw->back;
1016 netdev = adapter->netdev;
1017 res_buf = hw->hw_info.res_buf;
1018 info = (struct my_s *)&res_buf->info.info;
1020 mutex_lock(&hw->hw_info.lock);
1022 ret = fjes_hw_request_info(hw);
1027 if (!work_pending(&adapter->force_close_task)) {
1028 adapter->force_reset = true;
1029 schedule_work(&adapter->force_close_task);
1035 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1036 if (epidx == hw->my_epid) {
1037 hw->ep_shm_info[epidx].es_status =
1038 info[epidx].es_status;
1039 hw->ep_shm_info[epidx].zone =
1044 pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
1046 case EP_PARTNER_UNSHARE:
1048 if ((info[epidx].zone !=
1049 FJES_ZONING_ZONE_TYPE_NONE) &&
1050 (info[epidx].es_status ==
1051 FJES_ZONING_STATUS_ENABLE) &&
1052 (info[epidx].zone ==
1053 info[hw->my_epid].zone))
1054 set_bit(epidx, &share_bit);
1056 set_bit(epidx, &unshare_bit);
1059 case EP_PARTNER_COMPLETE:
1060 case EP_PARTNER_WAITING:
1061 if ((info[epidx].zone ==
1062 FJES_ZONING_ZONE_TYPE_NONE) ||
1063 (info[epidx].es_status !=
1064 FJES_ZONING_STATUS_ENABLE) ||
1065 (info[epidx].zone !=
1066 info[hw->my_epid].zone)) {
1068 &adapter->unshare_watch_bitmask);
1070 &hw->hw_info.buffer_unshare_reserve_bit);
1074 case EP_PARTNER_SHARED:
1075 if ((info[epidx].zone ==
1076 FJES_ZONING_ZONE_TYPE_NONE) ||
1077 (info[epidx].es_status !=
1078 FJES_ZONING_STATUS_ENABLE) ||
1079 (info[epidx].zone !=
1080 info[hw->my_epid].zone))
1081 set_bit(epidx, &irq_bit);
1085 hw->ep_shm_info[epidx].es_status =
1086 info[epidx].es_status;
1087 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1092 mutex_unlock(&hw->hw_info.lock);
1094 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1095 if (epidx == hw->my_epid)
1098 if (test_bit(epidx, &share_bit)) {
1099 spin_lock_irqsave(&hw->rx_status_lock, flags);
1100 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1101 netdev->dev_addr, netdev->mtu);
1102 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1104 mutex_lock(&hw->hw_info.lock);
1106 ret = fjes_hw_register_buff_addr(
1107 hw, epidx, &hw->ep_shm_info[epidx]);
1115 if (!work_pending(&adapter->force_close_task)) {
1116 adapter->force_reset = true;
1118 &adapter->force_close_task);
1122 mutex_unlock(&hw->hw_info.lock);
1124 hw->ep_shm_info[epidx].ep_stats
1125 .com_regist_buf_exec += 1;
1128 if (test_bit(epidx, &unshare_bit)) {
1129 mutex_lock(&hw->hw_info.lock);
1131 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1139 if (!work_pending(&adapter->force_close_task)) {
1140 adapter->force_reset = true;
1142 &adapter->force_close_task);
1147 mutex_unlock(&hw->hw_info.lock);
1149 hw->ep_shm_info[epidx].ep_stats
1150 .com_unregist_buf_exec += 1;
1153 spin_lock_irqsave(&hw->rx_status_lock, flags);
1154 fjes_hw_setup_epbuf(
1155 &hw->ep_shm_info[epidx].tx,
1156 netdev->dev_addr, netdev->mtu);
1157 spin_unlock_irqrestore(&hw->rx_status_lock,
1162 if (test_bit(epidx, &irq_bit)) {
1163 fjes_hw_raise_interrupt(hw, epidx,
1164 REG_ICTL_MASK_TXRX_STOP_REQ);
1166 hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
1168 set_bit(epidx, &hw->txrx_stop_req_bit);
1169 spin_lock_irqsave(&hw->rx_status_lock, flags);
1170 hw->ep_shm_info[epidx].tx.
1171 info->v1i.rx_status |=
1172 FJES_RX_STOP_REQ_REQUEST;
1173 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1174 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1178 if (irq_bit || adapter->unshare_watch_bitmask) {
1179 if (!work_pending(&adapter->unshare_watch_task))
1180 queue_work(adapter->control_wq,
1181 &adapter->unshare_watch_task);
1185 static void fjes_hw_epstop_task(struct work_struct *work)
1187 struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
1188 struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
1189 unsigned long flags;
1194 while ((remain_bit = hw->epstop_req_bit)) {
1195 for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
1196 if (remain_bit & 1) {
1197 spin_lock_irqsave(&hw->rx_status_lock, flags);
1198 hw->ep_shm_info[epid_bit].
1199 tx.info->v1i.rx_status |=
1200 FJES_RX_STOP_REQ_DONE;
1201 spin_unlock_irqrestore(&hw->rx_status_lock,
1204 clear_bit(epid_bit, &hw->epstop_req_bit);
1206 &adapter->unshare_watch_bitmask);
1208 if (!work_pending(&adapter->unshare_watch_task))
1210 adapter->control_wq,
1211 &adapter->unshare_watch_task);
1217 int fjes_hw_start_debug(struct fjes_hw *hw)
1219 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1220 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1221 enum fjes_dev_command_response_e ret;
1227 if (!hw->hw_info.trace)
1229 memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
1231 memset(req_buf, 0, hw->hw_info.req_buf_size);
1232 memset(res_buf, 0, hw->hw_info.res_buf_size);
1234 req_buf->start_trace.length =
1235 FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
1236 req_buf->start_trace.mode = hw->debug_mode;
1237 req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
1238 page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
1239 for (i = 0; i < page_count; i++) {
1240 addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
1241 req_buf->start_trace.buffer[i] =
1242 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
1243 offset_in_page(addr));
1246 res_buf->start_trace.length = 0;
1247 res_buf->start_trace.code = 0;
1249 trace_fjes_hw_start_debug_req(req_buf);
1250 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
1251 trace_fjes_hw_start_debug(res_buf);
1253 if (res_buf->start_trace.length !=
1254 FJES_DEV_COMMAND_START_DBG_RES_LEN) {
1256 trace_fjes_hw_start_debug_err("Invalid res_buf");
1257 } else if (ret == FJES_CMD_STATUS_NORMAL) {
1258 switch (res_buf->start_trace.code) {
1259 case FJES_CMD_REQ_RES_CODE_NORMAL:
1268 case FJES_CMD_STATUS_UNKNOWN:
1271 case FJES_CMD_STATUS_TIMEOUT:
1272 trace_fjes_hw_start_debug_err("Busy Timeout");
1275 case FJES_CMD_STATUS_ERROR_PARAM:
1276 case FJES_CMD_STATUS_ERROR_STATUS:
1286 int fjes_hw_stop_debug(struct fjes_hw *hw)
1288 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1289 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1290 enum fjes_dev_command_response_e ret;
1293 if (!hw->hw_info.trace)
1296 memset(req_buf, 0, hw->hw_info.req_buf_size);
1297 memset(res_buf, 0, hw->hw_info.res_buf_size);
1298 req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
1300 res_buf->stop_trace.length = 0;
1301 res_buf->stop_trace.code = 0;
1303 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
1304 trace_fjes_hw_stop_debug(res_buf);
1306 if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
1307 trace_fjes_hw_stop_debug_err("Invalid res_buf");
1309 } else if (ret == FJES_CMD_STATUS_NORMAL) {
1310 switch (res_buf->stop_trace.code) {
1311 case FJES_CMD_REQ_RES_CODE_NORMAL:
1321 case FJES_CMD_STATUS_UNKNOWN:
1324 case FJES_CMD_STATUS_TIMEOUT:
1326 trace_fjes_hw_stop_debug_err("Busy Timeout");
1328 case FJES_CMD_STATUS_ERROR_PARAM:
1329 case FJES_CMD_STATUS_ERROR_STATUS: