1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* backing_ops.c - query/set operations on saved SPU context.
4 * Copyright (C) IBM 2005
5 * Author: Mark Nutter <mnutter@us.ibm.com>
7 * These register operations allow SPUFS to operate on saved
8 * SPU contexts rather than hardware.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
15 #include <linux/vmalloc.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/poll.h>
23 #include <asm/spu_csa.h>
24 #include <asm/spu_info.h>
25 #include <asm/mmu_context.h>
29 * Reads/writes to various problem and priv2 registers require
30 * state changes, i.e. generate SPU events, modify channel
34 static void gen_spu_event(struct spu_context *ctx, u32 event)
40 ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
41 ch0_data = ctx->csa.spu_chnldata_RW[0];
42 ch1_data = ctx->csa.spu_chnldata_RW[1];
43 ctx->csa.spu_chnldata_RW[0] |= event;
44 if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
45 ctx->csa.spu_chnlcnt_RW[0] = 1;
49 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
54 spin_lock(&ctx->csa.register_lock);
55 mbox_stat = ctx->csa.prob.mb_stat_R;
56 if (mbox_stat & 0x0000ff) {
57 /* Read the first available word.
58 * Implementation note: the depth
59 * of pu_mb_R is currently 1.
61 *data = ctx->csa.prob.pu_mb_R;
62 ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
63 ctx->csa.spu_chnlcnt_RW[28] = 1;
64 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
67 spin_unlock(&ctx->csa.register_lock);
71 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
73 return ctx->csa.prob.mb_stat_R;
76 static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,
83 spin_lock_irq(&ctx->csa.register_lock);
84 stat = ctx->csa.prob.mb_stat_R;
86 /* if the requested event is there, return the poll
87 mask, otherwise enable the interrupt to get notified,
88 but first mark any pending interrupts as done so
89 we don't get woken up unnecessarily */
91 if (events & (EPOLLIN | EPOLLRDNORM)) {
93 ret |= EPOLLIN | EPOLLRDNORM;
95 ctx->csa.priv1.int_stat_class2_RW &=
97 ctx->csa.priv1.int_mask_class2_RW |=
98 CLASS2_ENABLE_MAILBOX_INTR;
101 if (events & (EPOLLOUT | EPOLLWRNORM)) {
103 ret = EPOLLOUT | EPOLLWRNORM;
105 ctx->csa.priv1.int_stat_class2_RW &=
106 ~CLASS2_MAILBOX_THRESHOLD_INTR;
107 ctx->csa.priv1.int_mask_class2_RW |=
108 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
111 spin_unlock_irq(&ctx->csa.register_lock);
115 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
119 spin_lock(&ctx->csa.register_lock);
120 if (ctx->csa.prob.mb_stat_R & 0xff0000) {
121 /* Read the first available word.
122 * Implementation note: the depth
123 * of puint_mb_R is currently 1.
125 *data = ctx->csa.priv2.puint_mb_R;
126 ctx->csa.prob.mb_stat_R &= ~(0xff0000);
127 ctx->csa.spu_chnlcnt_RW[30] = 1;
128 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
131 /* make sure we get woken up by the interrupt */
132 ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
135 spin_unlock(&ctx->csa.register_lock);
139 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
143 spin_lock(&ctx->csa.register_lock);
144 if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
145 int slot = ctx->csa.spu_chnlcnt_RW[29];
146 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
148 /* We have space to write wbox_data.
149 * Implementation note: the depth
150 * of spu_mb_W is currently 4.
152 BUG_ON(avail != (4 - slot));
153 ctx->csa.spu_mailbox_data[slot] = data;
154 ctx->csa.spu_chnlcnt_RW[29] = ++slot;
155 ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
156 ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
157 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
160 /* make sure we get woken up by the interrupt when space
162 ctx->csa.priv1.int_mask_class2_RW |=
163 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
166 spin_unlock(&ctx->csa.register_lock);
170 static u32 spu_backing_signal1_read(struct spu_context *ctx)
172 return ctx->csa.spu_chnldata_RW[3];
175 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
177 spin_lock(&ctx->csa.register_lock);
178 if (ctx->csa.priv2.spu_cfg_RW & 0x1)
179 ctx->csa.spu_chnldata_RW[3] |= data;
181 ctx->csa.spu_chnldata_RW[3] = data;
182 ctx->csa.spu_chnlcnt_RW[3] = 1;
183 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
184 spin_unlock(&ctx->csa.register_lock);
187 static u32 spu_backing_signal2_read(struct spu_context *ctx)
189 return ctx->csa.spu_chnldata_RW[4];
192 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
194 spin_lock(&ctx->csa.register_lock);
195 if (ctx->csa.priv2.spu_cfg_RW & 0x2)
196 ctx->csa.spu_chnldata_RW[4] |= data;
198 ctx->csa.spu_chnldata_RW[4] = data;
199 ctx->csa.spu_chnlcnt_RW[4] = 1;
200 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
201 spin_unlock(&ctx->csa.register_lock);
204 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
208 spin_lock(&ctx->csa.register_lock);
209 tmp = ctx->csa.priv2.spu_cfg_RW;
214 ctx->csa.priv2.spu_cfg_RW = tmp;
215 spin_unlock(&ctx->csa.register_lock);
218 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
220 return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
223 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
227 spin_lock(&ctx->csa.register_lock);
228 tmp = ctx->csa.priv2.spu_cfg_RW;
233 ctx->csa.priv2.spu_cfg_RW = tmp;
234 spin_unlock(&ctx->csa.register_lock);
237 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
239 return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
242 static u32 spu_backing_npc_read(struct spu_context *ctx)
244 return ctx->csa.prob.spu_npc_RW;
247 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
249 ctx->csa.prob.spu_npc_RW = val;
252 static u32 spu_backing_status_read(struct spu_context *ctx)
254 return ctx->csa.prob.spu_status_R;
257 static char *spu_backing_get_ls(struct spu_context *ctx)
259 return ctx->csa.lscsa->ls;
262 static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
264 ctx->csa.priv2.spu_privcntl_RW = val;
267 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
269 return ctx->csa.prob.spu_runcntl_RW;
272 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
274 spin_lock(&ctx->csa.register_lock);
275 ctx->csa.prob.spu_runcntl_RW = val;
276 if (val & SPU_RUNCNTL_RUNNABLE) {
277 ctx->csa.prob.spu_status_R &=
278 ~SPU_STATUS_STOPPED_BY_STOP &
279 ~SPU_STATUS_STOPPED_BY_HALT &
280 ~SPU_STATUS_SINGLE_STEP &
281 ~SPU_STATUS_INVALID_INSTR &
282 ~SPU_STATUS_INVALID_CH;
283 ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
285 ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
287 spin_unlock(&ctx->csa.register_lock);
290 static void spu_backing_runcntl_stop(struct spu_context *ctx)
292 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
295 static void spu_backing_master_start(struct spu_context *ctx)
297 struct spu_state *csa = &ctx->csa;
300 spin_lock(&csa->register_lock);
301 sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
302 csa->priv1.mfc_sr1_RW = sr1;
303 spin_unlock(&csa->register_lock);
306 static void spu_backing_master_stop(struct spu_context *ctx)
308 struct spu_state *csa = &ctx->csa;
311 spin_lock(&csa->register_lock);
312 sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
313 csa->priv1.mfc_sr1_RW = sr1;
314 spin_unlock(&csa->register_lock);
317 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
320 struct spu_problem_collapsed *prob = &ctx->csa.prob;
323 spin_lock(&ctx->csa.register_lock);
325 if (prob->dma_querytype_RW)
328 /* FIXME: what are the side-effects of this? */
329 prob->dma_querymask_RW = mask;
330 prob->dma_querytype_RW = mode;
331 /* In the current implementation, the SPU context is always
332 * acquired in runnable state when new bits are added to the
333 * mask (tagwait), so it's sufficient just to mask
334 * dma_tagstatus_R with the 'mask' parameter here.
336 ctx->csa.prob.dma_tagstatus_R &= mask;
338 spin_unlock(&ctx->csa.register_lock);
343 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
345 return ctx->csa.prob.dma_tagstatus_R;
348 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
350 return ctx->csa.prob.dma_qstatus_R;
353 static int spu_backing_send_mfc_command(struct spu_context *ctx,
354 struct mfc_dma_command *cmd)
358 spin_lock(&ctx->csa.register_lock);
360 /* FIXME: set up priv2->puq */
361 spin_unlock(&ctx->csa.register_lock);
366 static void spu_backing_restart_dma(struct spu_context *ctx)
368 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
371 struct spu_context_ops spu_backing_ops = {
372 .mbox_read = spu_backing_mbox_read,
373 .mbox_stat_read = spu_backing_mbox_stat_read,
374 .mbox_stat_poll = spu_backing_mbox_stat_poll,
375 .ibox_read = spu_backing_ibox_read,
376 .wbox_write = spu_backing_wbox_write,
377 .signal1_read = spu_backing_signal1_read,
378 .signal1_write = spu_backing_signal1_write,
379 .signal2_read = spu_backing_signal2_read,
380 .signal2_write = spu_backing_signal2_write,
381 .signal1_type_set = spu_backing_signal1_type_set,
382 .signal1_type_get = spu_backing_signal1_type_get,
383 .signal2_type_set = spu_backing_signal2_type_set,
384 .signal2_type_get = spu_backing_signal2_type_get,
385 .npc_read = spu_backing_npc_read,
386 .npc_write = spu_backing_npc_write,
387 .status_read = spu_backing_status_read,
388 .get_ls = spu_backing_get_ls,
389 .privcntl_write = spu_backing_privcntl_write,
390 .runcntl_read = spu_backing_runcntl_read,
391 .runcntl_write = spu_backing_runcntl_write,
392 .runcntl_stop = spu_backing_runcntl_stop,
393 .master_start = spu_backing_master_start,
394 .master_stop = spu_backing_master_stop,
395 .set_mfc_query = spu_backing_set_mfc_query,
396 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
397 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
398 .send_mfc_command = spu_backing_send_mfc_command,
399 .restart_dma = spu_backing_restart_dma,