1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
14 #include "registers.h"
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
19 /* Interrupt control bits */
20 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
22 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
24 pci_msi_mask_irq(data);
27 void idxd_mask_msix_vectors(struct idxd_device *idxd)
29 struct pci_dev *pdev = idxd->pdev;
30 int msixcnt = pci_msix_vec_count(pdev);
33 for (i = 0; i < msixcnt; i++)
34 idxd_mask_msix_vector(idxd, i);
37 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
39 struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
41 pci_msi_unmask_irq(data);
44 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
46 union genctrl_reg genctrl;
48 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
49 genctrl.softerr_int_en = 1;
50 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
53 void idxd_mask_error_interrupts(struct idxd_device *idxd)
55 union genctrl_reg genctrl;
57 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
58 genctrl.softerr_int_en = 0;
59 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
62 static void free_hw_descs(struct idxd_wq *wq)
66 for (i = 0; i < wq->num_descs; i++)
67 kfree(wq->hw_descs[i]);
72 static int alloc_hw_descs(struct idxd_wq *wq, int num)
74 struct device *dev = &wq->idxd->pdev->dev;
76 int node = dev_to_node(dev);
78 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
83 for (i = 0; i < num; i++) {
84 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
86 if (!wq->hw_descs[i]) {
95 static void free_descs(struct idxd_wq *wq)
99 for (i = 0; i < wq->num_descs; i++)
105 static int alloc_descs(struct idxd_wq *wq, int num)
107 struct device *dev = &wq->idxd->pdev->dev;
109 int node = dev_to_node(dev);
111 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
116 for (i = 0; i < num; i++) {
117 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
128 /* WQ control bits */
129 int idxd_wq_alloc_resources(struct idxd_wq *wq)
131 struct idxd_device *idxd = wq->idxd;
132 struct device *dev = &idxd->pdev->dev;
133 int rc, num_descs, i;
135 if (wq->type != IDXD_WQT_KERNEL)
138 wq->num_descs = wq->size;
139 num_descs = wq->size;
141 rc = alloc_hw_descs(wq, num_descs);
145 wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
146 wq->compls = dma_alloc_coherent(dev, wq->compls_size,
147 &wq->compls_addr, GFP_KERNEL);
150 goto fail_alloc_compls;
153 rc = alloc_descs(wq, num_descs);
155 goto fail_alloc_descs;
157 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
160 goto fail_sbitmap_init;
162 for (i = 0; i < num_descs; i++) {
163 struct idxd_desc *desc = wq->descs[i];
165 desc->hw = wq->hw_descs[i];
166 desc->completion = &wq->compls[i];
167 desc->compl_dma = wq->compls_addr +
168 sizeof(struct dsa_completion_record) * i;
179 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
185 void idxd_wq_free_resources(struct idxd_wq *wq)
187 struct device *dev = &wq->idxd->pdev->dev;
189 if (wq->type != IDXD_WQT_KERNEL)
194 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
195 sbitmap_queue_free(&wq->sbq);
198 int idxd_wq_enable(struct idxd_wq *wq)
200 struct idxd_device *idxd = wq->idxd;
201 struct device *dev = &idxd->pdev->dev;
204 if (wq->state == IDXD_WQ_ENABLED) {
205 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
209 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
211 if (status != IDXD_CMDSTS_SUCCESS &&
212 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
213 dev_dbg(dev, "WQ enable failed: %#x\n", status);
217 wq->state = IDXD_WQ_ENABLED;
218 dev_dbg(dev, "WQ %d enabled\n", wq->id);
222 int idxd_wq_disable(struct idxd_wq *wq)
224 struct idxd_device *idxd = wq->idxd;
225 struct device *dev = &idxd->pdev->dev;
228 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
230 if (wq->state != IDXD_WQ_ENABLED) {
231 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
235 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
236 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
238 if (status != IDXD_CMDSTS_SUCCESS) {
239 dev_dbg(dev, "WQ disable failed: %#x\n", status);
243 wq->state = IDXD_WQ_DISABLED;
244 dev_dbg(dev, "WQ %d disabled\n", wq->id);
248 void idxd_wq_drain(struct idxd_wq *wq)
250 struct idxd_device *idxd = wq->idxd;
251 struct device *dev = &idxd->pdev->dev;
254 if (wq->state != IDXD_WQ_ENABLED) {
255 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
259 dev_dbg(dev, "Draining WQ %d\n", wq->id);
260 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
261 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
264 void idxd_wq_reset(struct idxd_wq *wq)
266 struct idxd_device *idxd = wq->idxd;
267 struct device *dev = &idxd->pdev->dev;
270 if (wq->state != IDXD_WQ_ENABLED) {
271 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
275 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
276 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
277 wq->state = IDXD_WQ_DISABLED;
280 int idxd_wq_map_portal(struct idxd_wq *wq)
282 struct idxd_device *idxd = wq->idxd;
283 struct pci_dev *pdev = idxd->pdev;
284 struct device *dev = &pdev->dev;
285 resource_size_t start;
287 start = pci_resource_start(pdev, IDXD_WQ_BAR);
288 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
290 wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
293 dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
298 void idxd_wq_unmap_portal(struct idxd_wq *wq)
300 struct device *dev = &wq->idxd->pdev->dev;
302 devm_iounmap(dev, wq->dportal);
305 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
307 struct idxd_device *idxd = wq->idxd;
309 lockdep_assert_held(&idxd->dev_lock);
310 memset(wq->wqcfg, 0, idxd->wqcfg_size);
311 wq->type = IDXD_WQT_NONE;
316 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
317 memset(wq->name, 0, WQ_NAME_SIZE);
320 /* Device control bits */
321 static inline bool idxd_is_enabled(struct idxd_device *idxd)
323 union gensts_reg gensts;
325 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
327 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
332 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
334 union gensts_reg gensts;
336 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
338 return (gensts.state == IDXD_DEVICE_STATE_HALT);
342 * This is function is only used for reset during probe and will
343 * poll for completion. Once the device is setup with interrupts,
344 * all commands will be done via interrupt completion.
346 int idxd_device_init_reset(struct idxd_device *idxd)
348 struct device *dev = &idxd->pdev->dev;
349 union idxd_command_reg cmd;
352 if (idxd_device_is_halted(idxd)) {
353 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
357 memset(&cmd, 0, sizeof(cmd));
358 cmd.cmd = IDXD_CMD_RESET_DEVICE;
359 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
360 spin_lock_irqsave(&idxd->dev_lock, flags);
361 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
363 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
366 spin_unlock_irqrestore(&idxd->dev_lock, flags);
370 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
373 union idxd_command_reg cmd;
374 DECLARE_COMPLETION_ONSTACK(done);
377 if (idxd_device_is_halted(idxd)) {
378 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
380 *status = IDXD_CMDSTS_HW_ERR;
384 memset(&cmd, 0, sizeof(cmd));
386 cmd.operand = operand;
389 spin_lock_irqsave(&idxd->dev_lock, flags);
390 wait_event_lock_irq(idxd->cmd_waitq,
391 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
394 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
395 __func__, cmd_code, operand);
397 idxd->cmd_status = 0;
398 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
399 idxd->cmd_done = &done;
400 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
403 * After command submitted, release lock and go to sleep until
404 * the command completes via interrupt.
406 spin_unlock_irqrestore(&idxd->dev_lock, flags);
407 wait_for_completion(&done);
408 spin_lock_irqsave(&idxd->dev_lock, flags);
410 *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
411 idxd->cmd_status = *status & GENMASK(7, 0);
414 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
415 /* Wake up other pending commands */
416 wake_up(&idxd->cmd_waitq);
417 spin_unlock_irqrestore(&idxd->dev_lock, flags);
420 int idxd_device_enable(struct idxd_device *idxd)
422 struct device *dev = &idxd->pdev->dev;
425 if (idxd_is_enabled(idxd)) {
426 dev_dbg(dev, "Device already enabled\n");
430 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
432 /* If the command is successful or if the device was enabled */
433 if (status != IDXD_CMDSTS_SUCCESS &&
434 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
435 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
439 idxd->state = IDXD_DEV_ENABLED;
443 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
447 lockdep_assert_held(&idxd->dev_lock);
449 for (i = 0; i < idxd->max_wqs; i++) {
450 struct idxd_wq *wq = &idxd->wqs[i];
452 if (wq->state == IDXD_WQ_ENABLED) {
453 idxd_wq_disable_cleanup(wq);
454 wq->state = IDXD_WQ_DISABLED;
459 int idxd_device_disable(struct idxd_device *idxd)
461 struct device *dev = &idxd->pdev->dev;
465 if (!idxd_is_enabled(idxd)) {
466 dev_dbg(dev, "Device is not enabled\n");
470 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
472 /* If the command is successful or if the device was disabled */
473 if (status != IDXD_CMDSTS_SUCCESS &&
474 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
475 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
479 spin_lock_irqsave(&idxd->dev_lock, flags);
480 idxd_device_wqs_clear_state(idxd);
481 idxd->state = IDXD_DEV_CONF_READY;
482 spin_unlock_irqrestore(&idxd->dev_lock, flags);
486 void idxd_device_reset(struct idxd_device *idxd)
490 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
491 spin_lock_irqsave(&idxd->dev_lock, flags);
492 idxd_device_wqs_clear_state(idxd);
493 idxd->state = IDXD_DEV_CONF_READY;
494 spin_unlock_irqrestore(&idxd->dev_lock, flags);
497 /* Device configuration bits */
498 static void idxd_group_config_write(struct idxd_group *group)
500 struct idxd_device *idxd = group->idxd;
501 struct device *dev = &idxd->pdev->dev;
505 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
508 for (i = 0; i < 4; i++) {
509 grpcfg_offset = idxd->grpcfg_offset +
510 group->id * 64 + i * sizeof(u64);
511 iowrite64(group->grpcfg.wqs[i],
512 idxd->reg_base + grpcfg_offset);
513 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
514 group->id, i, grpcfg_offset,
515 ioread64(idxd->reg_base + grpcfg_offset));
518 /* setup GRPENGCFG */
519 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
520 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
521 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
522 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
525 grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
526 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
527 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
528 group->id, grpcfg_offset,
529 ioread32(idxd->reg_base + grpcfg_offset));
532 static int idxd_groups_config_write(struct idxd_device *idxd)
535 union gencfg_reg reg;
537 struct device *dev = &idxd->pdev->dev;
539 /* Setup bandwidth token limit */
540 if (idxd->token_limit) {
541 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
542 reg.token_limit = idxd->token_limit;
543 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
546 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
547 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
549 for (i = 0; i < idxd->max_groups; i++) {
550 struct idxd_group *group = &idxd->groups[i];
552 idxd_group_config_write(group);
558 static int idxd_wq_config_write(struct idxd_wq *wq)
560 struct idxd_device *idxd = wq->idxd;
561 struct device *dev = &idxd->pdev->dev;
569 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
570 * wq reset. This will copy back the sticky values that are present on some devices.
572 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
573 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
574 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
578 wq->wqcfg->wq_size = wq->size;
581 dev_warn(dev, "Incorrect work queue size: 0\n");
586 wq->wqcfg->wq_thresh = wq->threshold;
589 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
591 wq->wqcfg->priority = wq->priority;
594 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
595 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
597 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
598 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
599 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
600 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
601 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
602 wq->id, i, wq_offset,
603 ioread32(idxd->reg_base + wq_offset));
609 static int idxd_wqs_config_write(struct idxd_device *idxd)
613 for (i = 0; i < idxd->max_wqs; i++) {
614 struct idxd_wq *wq = &idxd->wqs[i];
616 rc = idxd_wq_config_write(wq);
624 static void idxd_group_flags_setup(struct idxd_device *idxd)
628 /* TC-A 0 and TC-B 1 should be defaults */
629 for (i = 0; i < idxd->max_groups; i++) {
630 struct idxd_group *group = &idxd->groups[i];
632 if (group->tc_a == -1)
633 group->tc_a = group->grpcfg.flags.tc_a = 0;
635 group->grpcfg.flags.tc_a = group->tc_a;
636 if (group->tc_b == -1)
637 group->tc_b = group->grpcfg.flags.tc_b = 1;
639 group->grpcfg.flags.tc_b = group->tc_b;
640 group->grpcfg.flags.use_token_limit = group->use_token_limit;
641 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
642 if (group->tokens_allowed)
643 group->grpcfg.flags.tokens_allowed =
644 group->tokens_allowed;
646 group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
650 static int idxd_engines_setup(struct idxd_device *idxd)
653 struct idxd_engine *eng;
654 struct idxd_group *group;
656 for (i = 0; i < idxd->max_groups; i++) {
657 group = &idxd->groups[i];
658 group->grpcfg.engines = 0;
661 for (i = 0; i < idxd->max_engines; i++) {
662 eng = &idxd->engines[i];
668 group->grpcfg.engines |= BIT(eng->id);
678 static int idxd_wqs_setup(struct idxd_device *idxd)
681 struct idxd_group *group;
682 int i, j, configured = 0;
683 struct device *dev = &idxd->pdev->dev;
685 for (i = 0; i < idxd->max_groups; i++) {
686 group = &idxd->groups[i];
687 for (j = 0; j < 4; j++)
688 group->grpcfg.wqs[j] = 0;
691 for (i = 0; i < idxd->max_wqs; i++) {
700 if (!wq_dedicated(wq)) {
701 dev_warn(dev, "No shared workqueue support.\n");
705 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
715 int idxd_device_config(struct idxd_device *idxd)
719 lockdep_assert_held(&idxd->dev_lock);
720 rc = idxd_wqs_setup(idxd);
724 rc = idxd_engines_setup(idxd);
728 idxd_group_flags_setup(idxd);
730 rc = idxd_wqs_config_write(idxd);
734 rc = idxd_groups_config_write(idxd);