2 * Copyright (C) 2006-2010 Michael Buesch <m@bues.ch>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
26 extern int yyparse(void);
30 const char *infile_name;
31 const char *outfile_name;
41 unsigned int operand; /* For NORMAL */
42 struct label *label; /* For LABELREF */
52 /* Set to true, if this is a jump instruction.
53 * This is only used when assembling RET to check
54 * whether the previous instruction was a jump or not. */
58 struct out_operand operands[3];
60 /* The absolute address of this instruction.
61 * Only used in resolve_labels(). */
64 const char *labelname; /* only for OUT_LABEL */
65 /* Set to 1, if this is the %start instruction. */
68 struct list_head list;
71 struct assembler_context {
72 /* The architecture version (802.11 core revision) */
75 struct label *start_label;
78 struct statement *cur_stmt;
80 struct list_head output;
84 #define for_each_statement(ctx, s) \
85 list_for_each_entry(s, &infile.sl, list) { \
88 #define for_each_statement_end(ctx, s) \
89 } do { ctx->cur_stmt = NULL; } while (0)
91 #define _msg_helper(type, stmt, msg, x...) do { \
92 fprintf(stderr, "Assembler " type); \
94 fprintf(stderr, " (file \"%s\", line %u)", \
98 fprintf(stderr, ":\n " msg "\n" ,##x); \
101 #define asm_error(ctx, msg, x...) do { \
102 _msg_helper("ERROR", (ctx)->cur_stmt, msg ,##x); \
106 #define asm_warn(ctx, msg, x...) \
107 _msg_helper("warning", (ctx)->cur_stmt, msg ,##x)
109 #define asm_info(ctx, msg, x...) \
110 _msg_helper("info", (ctx)->cur_stmt, msg ,##x)
113 static void eval_directives(struct assembler_context *ctx)
118 int have_start_label = 0;
120 unsigned int arch_fallback = 0;
122 for_each_statement(ctx, s) {
123 if (s->type == STMT_ASMDIR) {
128 asm_error(ctx, "Multiple %%arch definitions");
129 ctx->arch = ad->u.arch;
130 if (ctx->arch > 5 && ctx->arch < 15)
135 asm_warn(ctx, "Using %%arch %d is incorrect. "
136 "The wireless core revision %d uses the "
137 "firmware architecture %d. So use %%arch %d",
138 ctx->arch, ctx->arch, arch_fallback, arch_fallback);
139 ctx->arch = arch_fallback;
141 if (ctx->arch != 5 && ctx->arch != 15) {
142 asm_error(ctx, "Architecture version %u unsupported",
148 if (have_start_label)
149 asm_error(ctx, "Multiple %%start definitions");
150 ctx->start_label = ad->u.start;
151 have_start_label = 1;
154 asm_error(ctx, "Unknown ASM directive");
157 } for_each_statement_end(ctx, s);
160 asm_error(ctx, "No %%arch defined");
161 if (!have_start_label)
162 asm_info(ctx, "Using start address 0");
165 static bool is_possible_imm(unsigned int imm)
169 /* Immediates are only possible up to 16bit (wordsize). */
172 if (imm & (1 << 15)) {
173 if ((imm & mask) != mask &&
177 if ((imm & mask) != 0)
184 static unsigned int immediate_nr_bits(struct assembler_context *ctx)
188 return 10; /* 10 bits */
190 return 11; /* 11 bits */
192 asm_error(ctx, "Internal error: immediate_nr_bits unknown arch\n");
195 static bool is_valid_imm(struct assembler_context *ctx,
199 unsigned int immediate_size;
201 /* This function checks if the immediate value is representable
202 * as a native immediate operand.
204 * For v5 architecture the immediate can be 10bit long.
205 * For v15 architecture the immediate can be 11bit long.
207 * The value is sign-extended, so we allow values
208 * of 0xFFFA, for example.
211 if (!is_possible_imm(imm))
215 immediate_size = immediate_nr_bits(ctx);
217 /* First create a mask with all possible bits for
218 * an immediate value unset. */
219 mask = (~0 << immediate_size) & 0xFFFF;
220 /* Is the sign bit of the immediate set? */
221 if (imm & (1 << (immediate_size - 1))) {
222 /* Yes, so all bits above that must also
223 * be set, otherwise we can't represent this
224 * value in an operand. */
225 if ((imm & mask) != mask)
228 /* All bits above the immediate's size must
237 /* This checks if the value is nonzero and a power of two. */
238 static bool is_power_of_two(unsigned int value)
240 return (value && ((value & (value - 1)) == 0));
243 /* This checks if all bits set in the mask are contiguous.
244 * Zero is also considered a contiguous mask. */
245 static bool is_contiguous_bitmask(unsigned int mask)
247 unsigned int low_zeros_mask;
252 /* Turn the lowest zeros of the mask into a bitmask.
253 * Example: 0b00011000 -> 0b00000111 */
254 low_zeros_mask = (mask - 1) & ~mask;
255 /* Adding the low_zeros_mask to the original mask
256 * basically is a bitwise OR operation.
257 * If the original mask was contiguous, we end up with a
258 * contiguous bitmask from bit 0 to the highest bit
259 * set in the original mask. Adding 1 will result in a single
260 * bit set, which is a power of two. */
261 is_contiguous = is_power_of_two(mask + low_zeros_mask + 1);
263 return is_contiguous;
266 static unsigned int generate_imm_operand(struct assembler_context *ctx,
267 const struct immediate *imm)
269 unsigned int val, tmp;
277 if (!is_valid_imm(ctx, tmp)) {
278 asm_warn(ctx, "IMMEDIATE 0x%X (%d) too long "
279 "(> %u bits + sign). Did you intend to "
280 "use implicit sign extension?",
281 tmp, (int)tmp, immediate_nr_bits(ctx) - 1);
293 static unsigned int generate_reg_operand(struct assembler_context *ctx,
294 const struct registr *reg)
296 unsigned int val = 0;
303 if (reg->nr & ~0x3F) /* REVISIT: 128 regs for v15 arch possible? Probably not... */
304 asm_error(ctx, "GPR-nr too big");
311 if (reg->nr & ~0x1FF)
312 asm_error(ctx, "SPR-nr too big");
320 asm_error(ctx, "OFFR-nr too big");
324 asm_error(ctx, "generate_reg_operand() regtype");
330 static unsigned int generate_mem_operand(struct assembler_context *ctx,
331 const struct memory *mem)
333 unsigned int val = 0, off, reg, off_mask, reg_shift;
341 asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 11 bits)", off);
347 asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 12 bits)", off);
352 asm_error(ctx, "Internal error: generate_mem_operand invalid arch");
369 asm_error(ctx, "Internal error: MEM_INDIRECT invalid arch\n");
374 if (off & ~off_mask) {
375 asm_warn(ctx, "INDIRECT memoffset 0x%X too long (> %u bits)",
380 /* Assembler bug. The parser shouldn't pass this value. */
381 asm_error(ctx, "OFFR-nr too big");
384 asm_warn(ctx, "Using offset register 6. This register is broken "
385 "on certain devices. Use off0 to off5 only.");
388 val |= (reg << reg_shift);
391 asm_error(ctx, "generate_mem_operand() memtype");
397 static void generate_operand(struct assembler_context *ctx,
398 const struct operand *oper,
399 struct out_operand *out)
401 out->type = OUTOPER_NORMAL;
403 switch (oper->type) {
405 out->u.operand = generate_imm_operand(ctx, oper->u.imm);
408 out->u.operand = generate_reg_operand(ctx, oper->u.reg);
411 out->u.operand = generate_mem_operand(ctx, oper->u.mem);
414 out->type = OUTOPER_LABELREF;
415 out->u.label = oper->u.label;
418 out->u.operand = oper->u.addr->addr;
421 out->u.operand = oper->u.raw;
424 asm_error(ctx, "generate_operand() operstate");
428 static struct code_output * do_assemble_insn(struct assembler_context *ctx,
429 struct instruction *insn,
436 struct code_output *out;
437 struct label *labelref = NULL;
438 struct operand *oper;
439 int have_spr_operand = 0;
440 int have_mem_operand = 0;
442 out = xmalloc(sizeof(*out));
443 INIT_LIST_HEAD(&out->list);
444 out->opcode = opcode;
447 if (ARRAY_SIZE(out->operands) > ARRAY_SIZE(ol->oper))
448 asm_error(ctx, "Internal operand array confusion");
450 for (i = 0; i < ARRAY_SIZE(out->operands); i++) {
455 /* If this is an INPUT operand (first or second), we must
456 * make sure that not both are accessing SPR or MEMORY.
457 * The device only supports one SPR or MEMORY operand in
458 * the input operands. */
459 if ((i == 0) || (i == 1)) {
460 if ((oper->type == OPER_REG) &&
461 (oper->u.reg->type == SPR)) {
462 if (have_spr_operand)
463 asm_error(ctx, "Multiple SPR input operands in one instruction");
464 have_spr_operand = 1;
466 if (oper->type == OPER_MEM) {
467 if (have_mem_operand)
468 asm_error(ctx, "Multiple MEMORY input operands in on instruction");
469 have_mem_operand = 1;
473 generate_operand(ctx, oper, &out->operands[i]);
477 asm_error(ctx, "Internal error: nr_oper at "
478 "lowlevel do_assemble_insn");
480 list_add_tail(&out->list, &ctx->output);
485 static void do_assemble_ret(struct assembler_context *ctx,
486 struct instruction *insn,
489 struct code_output *out;
491 /* Get the previous instruction and check whether it
492 * is a jump instruction. */
493 list_for_each_entry_reverse(out, &ctx->output, list) {
494 /* Search the last insn. */
495 if (out->type == OUT_INSN) {
496 if (out->is_jump_insn) {
497 asm_warn(ctx, "RET instruction directly after "
498 "jump instruction. The hardware won't like this.");
503 do_assemble_insn(ctx, insn, opcode);
506 static unsigned int merge_ext_into_opcode(struct assembler_context *ctx,
508 struct instruction *insn)
512 unsigned int mask, shift;
516 mask = ol->oper[0]->u.raw;
518 asm_error(ctx, "opcode MASK extension too big (> 0xF)");
519 shift = ol->oper[1]->u.raw;
521 asm_error(ctx, "opcode SHIFT extension too big (> 0xF)");
522 opcode |= (mask << 4);
524 ol->oper[0] = ol->oper[2];
525 ol->oper[1] = ol->oper[3];
526 ol->oper[2] = ol->oper[4];
531 static unsigned int merge_external_jmp_into_opcode(struct assembler_context *ctx,
533 struct instruction *insn)
535 struct operand *fake;
536 struct registr *fake_reg;
537 struct operand *target;
544 cond = ol->oper[0]->u.imm->imm;
546 asm_error(ctx, "External jump condition value too big (> 0xFF)");
548 target = ol->oper[1];
549 memset(ol->oper, 0, sizeof(ol->oper));
551 /* This instruction has two fake r0 operands
552 * at position 0 and 1. */
553 fake = xmalloc(sizeof(*fake));
554 fake_reg = xmalloc(sizeof(*fake_reg));
555 fake->type = OPER_REG;
556 fake->u.reg = fake_reg;
557 fake_reg->type = GPR;
562 ol->oper[2] = target;
567 static void assemble_instruction(struct assembler_context *ctx,
568 struct instruction *insn);
570 static void emulate_mov_insn(struct assembler_context *ctx,
571 struct instruction *insn)
573 struct instruction em_insn;
574 struct operlist em_ol;
575 struct operand em_op_shift;
576 struct operand em_op_mask;
577 struct operand em_op_x;
578 struct operand em_op_y;
579 struct immediate em_imm_x;
580 struct immediate em_imm_y;
582 struct operand *in, *out;
585 /* This is a pseudo-OP. We emulate it by OR or ORX */
587 in = insn->operands->oper[0];
588 out = insn->operands->oper[1];
593 em_op_x.type = OPER_IMM;
594 em_op_x.u.imm = &em_imm_x;
595 em_ol.oper[1] = &em_op_x;
598 if (in->type == OPER_IMM) {
599 tmp = in->u.imm->imm;
600 if (!is_possible_imm(tmp))
601 asm_error(ctx, "MOV operand 0x%X > 16bit", tmp);
602 if (!is_valid_imm(ctx, tmp)) {
603 /* Immediate too big for plain OR */
606 em_op_mask.type = OPER_RAW;
607 em_op_mask.u.raw = 0x7;
608 em_op_shift.type = OPER_RAW;
609 em_op_shift.u.raw = 0x8;
611 em_imm_x.imm = (tmp & 0xFF00) >> 8;
612 em_op_x.type = OPER_IMM;
613 em_op_x.u.imm = &em_imm_x;
615 em_imm_y.imm = (tmp & 0x00FF);
616 em_op_y.type = OPER_IMM;
617 em_op_y.u.imm = &em_imm_y;
619 em_ol.oper[0] = &em_op_mask;
620 em_ol.oper[1] = &em_op_shift;
621 em_ol.oper[2] = &em_op_x;
622 em_ol.oper[3] = &em_op_y;
627 em_insn.operands = &em_ol;
628 assemble_instruction(ctx, &em_insn); /* recurse */
631 static void emulate_jmp_insn(struct assembler_context *ctx,
632 struct instruction *insn)
634 struct instruction em_insn;
635 struct operlist em_ol;
636 struct immediate em_condition;
637 struct operand em_cond_op;
639 /* This is a pseudo-OP. We emulate it with
640 * JEXT 0x7F, target */
642 em_insn.op = OP_JEXT;
643 em_condition.imm = 0x7F; /* Ext cond: Always true */
644 em_cond_op.type = OPER_IMM;
645 em_cond_op.u.imm = &em_condition;
646 em_ol.oper[0] = &em_cond_op;
647 em_ol.oper[1] = insn->operands->oper[0]; /* Target */
648 em_insn.operands = &em_ol;
650 assemble_instruction(ctx, &em_insn); /* recurse */
653 static void emulate_jand_insn(struct assembler_context *ctx,
654 struct instruction *insn,
657 struct code_output *out;
658 struct instruction em_insn;
659 struct operlist em_ol;
660 struct operand em_op_shift;
661 struct operand em_op_mask;
662 struct operand em_op_y;
663 struct immediate em_imm;
665 struct operand *oper0, *oper1, *oper2;
666 struct operand *imm_oper = NULL;
668 int first_bit, last_bit;
670 oper0 = insn->operands->oper[0];
671 oper1 = insn->operands->oper[1];
672 oper2 = insn->operands->oper[2];
674 if (oper0->type == OPER_IMM)
676 if (oper1->type == OPER_IMM)
678 if (oper0->type == OPER_IMM && oper1->type == OPER_IMM)
682 /* We have a single immediate operand.
683 * Check if it's representable by a normal JAND insn.
685 tmp = imm_oper->u.imm->imm;
686 if (!is_valid_imm(ctx, tmp)) {
687 /* Nope, this must be emulated by JZX/JNZX */
688 if (!is_contiguous_bitmask(tmp)) {
689 asm_error(ctx, "Long bitmask 0x%X is not contiguous",
693 first_bit = ffs(tmp);
694 last_bit = ffs(~(tmp >> (first_bit - 1))) - 1 + first_bit - 1;
699 em_insn.op = OP_JNZX;
700 em_op_shift.type = OPER_RAW;
701 em_op_shift.u.raw = first_bit - 1;
702 em_op_mask.type = OPER_RAW;
703 em_op_mask.u.raw = last_bit - first_bit;
706 em_op_y.type = OPER_IMM;
707 em_op_y.u.imm = &em_imm;
709 em_ol.oper[0] = &em_op_mask;
710 em_ol.oper[1] = &em_op_shift;
711 if (oper0->type != OPER_IMM)
712 em_ol.oper[2] = oper0;
714 em_ol.oper[2] = oper1;
715 em_ol.oper[3] = &em_op_y;
716 em_ol.oper[4] = oper2;
718 em_insn.operands = &em_ol;
720 assemble_instruction(ctx, &em_insn); /* recurse */
725 /* Do a normal JAND/JNAND instruction */
727 out = do_assemble_insn(ctx, insn, 0x040 | 0x1);
729 out = do_assemble_insn(ctx, insn, 0x040);
730 out->is_jump_insn = 1;
733 static void assemble_instruction(struct assembler_context *ctx,
734 struct instruction *insn)
736 struct code_output *out;
741 do_assemble_insn(ctx, insn, 0x101);
744 do_assemble_insn(ctx, insn, 0x1C0);
747 do_assemble_insn(ctx, insn, 0x1C2);
750 do_assemble_insn(ctx, insn, 0x1C1);
753 do_assemble_insn(ctx, insn, 0x1C3);
756 do_assemble_insn(ctx, insn, 0x1D0);
759 do_assemble_insn(ctx, insn, 0x1D2);
762 do_assemble_insn(ctx, insn, 0x1D1);
765 do_assemble_insn(ctx, insn, 0x1D3);
768 do_assemble_insn(ctx, insn, 0x130);
771 do_assemble_insn(ctx, insn, 0x160);
774 do_assemble_insn(ctx, insn, 0x140);
777 do_assemble_insn(ctx, insn, 0x170);
780 do_assemble_insn(ctx, insn, 0x120);
783 opcode = merge_ext_into_opcode(ctx, 0x200, insn);
784 do_assemble_insn(ctx, insn, opcode);
787 do_assemble_insn(ctx, insn, 0x110);
790 do_assemble_insn(ctx, insn, 0x1A0);
793 do_assemble_insn(ctx, insn, 0x1B0);
796 do_assemble_insn(ctx, insn, 0x150);
799 opcode = merge_ext_into_opcode(ctx, 0x300, insn);
800 do_assemble_insn(ctx, insn, opcode);
803 emulate_mov_insn(ctx, insn);
806 emulate_jmp_insn(ctx, insn);
809 emulate_jand_insn(ctx, insn, 0);
812 emulate_jand_insn(ctx, insn, 1);
815 out = do_assemble_insn(ctx, insn, 0x050);
816 out->is_jump_insn = 1;
819 out = do_assemble_insn(ctx, insn, 0x050 | 0x1);
820 out->is_jump_insn = 1;
823 out = do_assemble_insn(ctx, insn, 0x0D0);
824 out->is_jump_insn = 1;
827 out = do_assemble_insn(ctx, insn, 0x0D0 | 0x1);
828 out->is_jump_insn = 1;
831 out = do_assemble_insn(ctx, insn, 0x0D2);
832 out->is_jump_insn = 1;
835 out = do_assemble_insn(ctx, insn, 0x0D2 | 0x1);
836 out->is_jump_insn = 1;
839 out = do_assemble_insn(ctx, insn, 0x0D4);
840 out->is_jump_insn = 1;
843 out = do_assemble_insn(ctx, insn, 0x0D4 | 0x1);
844 out->is_jump_insn = 1;
847 out = do_assemble_insn(ctx, insn, 0x0DA);
848 out->is_jump_insn = 1;
851 out = do_assemble_insn(ctx, insn, 0x0DA | 0x1);
852 out->is_jump_insn = 1;
855 out = do_assemble_insn(ctx, insn, 0x0DC);
858 out = do_assemble_insn(ctx, insn, 0x0DC | 0x1);
859 out->is_jump_insn = 1;
862 out = do_assemble_insn(ctx, insn, 0x0D6);
863 out->is_jump_insn = 1;
866 out = do_assemble_insn(ctx, insn, 0x0D6 | 0x1);
867 out->is_jump_insn = 1;
870 out = do_assemble_insn(ctx, insn, 0x0D8);
871 out->is_jump_insn = 1;
874 out = do_assemble_insn(ctx, insn, 0x0D8 | 0x1);
875 out->is_jump_insn = 1;
878 opcode = merge_ext_into_opcode(ctx, 0x400, insn);
879 out = do_assemble_insn(ctx, insn, opcode);
880 out->is_jump_insn = 1;
883 opcode = merge_ext_into_opcode(ctx, 0x500, insn);
884 out = do_assemble_insn(ctx, insn, opcode);
885 out->is_jump_insn = 1;
888 opcode = merge_external_jmp_into_opcode(ctx, 0x700, insn);
889 out = do_assemble_insn(ctx, insn, opcode);
890 out->is_jump_insn = 1;
893 opcode = merge_external_jmp_into_opcode(ctx, 0x600, insn);
894 out = do_assemble_insn(ctx, insn, opcode);
895 out->is_jump_insn = 1;
899 asm_error(ctx, "'call' instruction is only supported on arch 5");
900 do_assemble_insn(ctx, insn, 0x002);
904 asm_error(ctx, "'calls' instruction is only supported on arch 15");
905 do_assemble_insn(ctx, insn, 0x004);
909 asm_error(ctx, "'ret' instruction is only supported on arch 5");
910 do_assemble_ret(ctx, insn, 0x003);
914 asm_error(ctx, "'rets' instruction is only supported on arch 15");
915 do_assemble_insn(ctx, insn, 0x005);
921 do_assemble_insn(ctx, insn, 0x1E0);
924 do_assemble_insn(ctx, insn, 0x001);
927 do_assemble_insn(ctx, insn, insn->opcode);
930 asm_error(ctx, "Unknown op");
934 static void assemble_instructions(struct assembler_context *ctx)
937 struct instruction *insn;
938 struct code_output *out;
940 if (ctx->start_label) {
941 /* Generate a jump instruction at offset 0 to
942 * jump to the code start.
944 struct instruction sjmp;
948 oper.type = OPER_LABEL;
949 oper.u.label = ctx->start_label;
954 assemble_instruction(ctx, &sjmp);
955 out = list_entry(ctx->output.next, struct code_output, list);
956 out->is_start_insn = 1;
959 for_each_statement(ctx, s) {
964 assemble_instruction(ctx, insn);
967 out = xmalloc(sizeof(*out));
968 INIT_LIST_HEAD(&out->list);
969 out->type = OUT_LABEL;
970 out->labelname = s->u.label->name;
972 list_add_tail(&out->list, &ctx->output);
977 } for_each_statement_end(ctx, s);
980 /* Resolve a label reference to the address it points to. */
981 static int get_labeladdress(struct assembler_context *ctx,
982 struct code_output *this_insn,
983 struct label *labelref)
985 struct code_output *c;
989 switch (labelref->direction) {
990 case LABELREF_ABSOLUTE:
991 list_for_each_entry(c, &ctx->output, list) {
992 if (c->type != OUT_LABEL)
994 if (strcmp(c->labelname, labelref->name) != 0)
997 asm_error(ctx, "Ambiguous label reference \"%s\"",
1001 address = c->address;
1004 case LABELREF_RELATIVE_BACK:
1005 for (c = list_entry(this_insn->list.prev, typeof(*c), list);
1006 &c->list != &ctx->output;
1007 c = list_entry(c->list.prev, typeof(*c), list)) {
1008 if (c->type != OUT_LABEL)
1010 if (strcmp(c->labelname, labelref->name) == 0) {
1012 address = c->address;
1017 case LABELREF_RELATIVE_FORWARD:
1018 for (c = list_entry(this_insn->list.next, typeof(*c), list);
1019 &c->list != &ctx->output;
1020 c = list_entry(c->list.next, typeof(*c), list)) {
1021 if (c->type != OUT_LABEL)
1023 if (strcmp(c->labelname, labelref->name) == 0) {
1025 address = c->address;
1035 static void resolve_labels(struct assembler_context *ctx)
1037 struct code_output *c;
1040 unsigned int current_address;
1042 /* Calculate the absolute addresses for each instruction. */
1043 recalculate_addresses:
1044 current_address = 0;
1045 list_for_each_entry(c, &ctx->output, list) {
1048 c->address = current_address;
1052 c->address = current_address;
1057 /* Resolve the symbolic label references. */
1058 list_for_each_entry(c, &ctx->output, list) {
1061 if (c->is_start_insn) {
1062 /* If the first %start-jump jumps to 001, we can
1063 * optimize it away, as it's unneeded.
1066 if (c->operands[i].type != OUTOPER_LABELREF)
1067 asm_error(ctx, "Internal error, %%start insn oper 2 not labelref");
1068 if (c->operands[i].u.label->direction != LABELREF_ABSOLUTE)
1069 asm_error(ctx, "%%start label reference not absolute");
1070 addr = get_labeladdress(ctx, c, c->operands[i].u.label);
1072 goto does_not_exist;
1074 list_del(&c->list); /* Kill it */
1075 goto recalculate_addresses;
1079 for (i = 0; i < ARRAY_SIZE(c->operands); i++) {
1080 if (c->operands[i].type != OUTOPER_LABELREF)
1082 addr = get_labeladdress(ctx, c, c->operands[i].u.label);
1084 goto does_not_exist;
1085 c->operands[i].u.operand = addr;
1087 /* Is not a jump target.
1088 * Make it be an immediate */
1090 c->operands[i].u.operand |= 0xC00;
1091 else if (ctx->arch == 15)
1092 c->operands[i].u.operand |= 0xC00 << 1;
1094 asm_error(ctx, "Internal error: label res imm");
1105 asm_error(ctx, "Label \"%s\" does not exist",
1106 c->operands[i].u.label->name);
1109 static void emit_code(struct assembler_context *ctx)
1113 struct code_output *c;
1115 unsigned char outbuf[8];
1116 unsigned int insn_count = 0, insn_count_limit;
1117 struct fw_header hdr;
1120 fd = fopen(fn, "w+");
1122 fprintf(stderr, "Could not open microcode output file \"%s\"\n", fn);
1125 if (IS_VERBOSE_DEBUG)
1126 printf("\nCode:\n");
1128 list_for_each_entry(c, &ctx->output, list) {
1138 switch (cmdargs.outformat) {
1144 memset(&hdr, 0, sizeof(hdr));
1145 hdr.type = FW_TYPE_UCODE;
1146 hdr.ver = FW_HDR_VER;
1147 hdr.size = cpu_to_be32(8 * insn_count);
1148 if (fwrite(&hdr, sizeof(hdr), 1, fd) != 1) {
1149 fprintf(stderr, "Could not write microcode outfile\n");
1155 switch (ctx->arch) {
1157 insn_count_limit = NUM_INSN_LIMIT_R5;
1160 insn_count_limit = ~0; //FIXME limit currently unknown.
1163 asm_error(ctx, "Internal error: emit_code unknown arch\n");
1165 if (insn_count > insn_count_limit)
1166 asm_warn(ctx, "Generating more than %u instructions. This "
1167 "will overflow the device microcode memory.",
1170 list_for_each_entry(c, &ctx->output, list) {
1173 if (IS_VERBOSE_DEBUG) {
1174 printf("%03X %04X,%04X,%04X\n",
1176 c->operands[0].u.operand,
1177 c->operands[1].u.operand,
1178 c->operands[2].u.operand);
1181 switch (ctx->arch) {
1184 code |= ((uint64_t)c->operands[2].u.operand);
1185 code |= ((uint64_t)c->operands[1].u.operand) << 12;
1186 code |= ((uint64_t)c->operands[0].u.operand) << 24;
1187 code |= ((uint64_t)c->opcode) << 36;
1191 code |= ((uint64_t)c->operands[2].u.operand);
1192 code |= ((uint64_t)c->operands[1].u.operand) << 13;
1193 code |= ((uint64_t)c->operands[0].u.operand) << 26;
1194 code |= ((uint64_t)c->opcode) << 39;
1197 asm_error(ctx, "No emit format for arch %u",
1201 switch (cmdargs.outformat) {
1204 code = ((code & (uint64_t)0xFFFFFFFF00000000ULL) >> 32) |
1205 ((code & (uint64_t)0x00000000FFFFFFFFULL) << 32);
1206 outbuf[0] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
1207 outbuf[1] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
1208 outbuf[2] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
1209 outbuf[3] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
1210 outbuf[4] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
1211 outbuf[5] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
1212 outbuf[6] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
1213 outbuf[7] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
1216 outbuf[7] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
1217 outbuf[6] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
1218 outbuf[5] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
1219 outbuf[4] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
1220 outbuf[3] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
1221 outbuf[2] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
1222 outbuf[1] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
1223 outbuf[0] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
1227 if (fwrite(&outbuf, ARRAY_SIZE(outbuf), 1, fd) != 1) {
1228 fprintf(stderr, "Could not write microcode outfile\n");
1237 if (cmdargs.print_sizes) {
1238 printf("%s: text = %u instructions (%u bytes)\n",
1240 (unsigned int)(insn_count * sizeof(uint64_t)));
1246 static void assemble(void)
1248 struct assembler_context ctx;
1250 memset(&ctx, 0, sizeof(ctx));
1251 INIT_LIST_HEAD(&ctx.output);
1253 eval_directives(&ctx);
1254 assemble_instructions(&ctx);
1255 resolve_labels(&ctx);
1259 static void initialize(void)
1261 INIT_LIST_HEAD(&infile.sl);
1262 INIT_LIST_HEAD(&infile.ivals);
1264 if (IS_INSANE_DEBUG)
1268 #endif /* YYDEBUG */
1271 int main(int argc, char **argv)
1275 err = parse_args(argc, argv);
1282 err = open_input_file();
1288 assemble_initvals();
1292 /* Lazyman simply leaks all allocated memory. */