2 * Copyright (C) 2006-2010 Michael Buesch <mb@bu3sch.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
26 extern int yyparse(void);
30 const char *infile_name;
31 const char *outfile_name;
41 unsigned int operand; /* For NORMAL */
42 struct label *label; /* For LABELREF */
52 /* Set to true, if this is a jump instruction.
53 * This is only used when assembling RET to check
54 * whether the previous instruction was a jump or not. */
58 struct out_operand operands[3];
60 /* The absolute address of this instruction.
61 * Only used in resolve_labels(). */
64 const char *labelname; /* only for OUT_LABEL */
65 /* Set to 1, if this is the %start instruction. */
68 struct list_head list;
71 struct assembler_context {
72 /* The architecture version (802.11 core revision) */
75 struct label *start_label;
78 struct statement *cur_stmt;
80 struct list_head output;
84 #define for_each_statement(ctx, s) \
85 list_for_each_entry(s, &infile.sl, list) { \
88 #define for_each_statement_end(ctx, s) \
89 } do { ctx->cur_stmt = NULL; } while (0)
91 #define _msg_helper(type, stmt, msg, x...) do { \
92 fprintf(stderr, "Assembler " type); \
94 fprintf(stderr, " (file \"%s\", line %u)", \
98 fprintf(stderr, ":\n " msg "\n" ,##x); \
101 #define asm_error(ctx, msg, x...) do { \
102 _msg_helper("ERROR", (ctx)->cur_stmt, msg ,##x); \
106 #define asm_warn(ctx, msg, x...) \
107 _msg_helper("warning", (ctx)->cur_stmt, msg ,##x)
109 #define asm_info(ctx, msg, x...) \
110 _msg_helper("info", (ctx)->cur_stmt, msg ,##x)
113 static void eval_directives(struct assembler_context *ctx)
118 int have_start_label = 0;
120 unsigned int arch_fallback = 0;
122 for_each_statement(ctx, s) {
123 if (s->type == STMT_ASMDIR) {
128 asm_error(ctx, "Multiple %%arch definitions");
129 ctx->arch = ad->u.arch;
130 if (ctx->arch > 5 && ctx->arch < 15)
135 asm_warn(ctx, "Using %%arch %d is incorrect. "
136 "The wireless core revision %d uses the "
137 "firmware architecture %d. So use %%arch %d",
138 ctx->arch, ctx->arch, arch_fallback, arch_fallback);
139 ctx->arch = arch_fallback;
141 if (ctx->arch != 5 && ctx->arch != 15) {
142 asm_error(ctx, "Architecture version %u unsupported",
148 if (have_start_label)
149 asm_error(ctx, "Multiple %%start definitions");
150 ctx->start_label = ad->u.start;
151 have_start_label = 1;
154 asm_error(ctx, "Unknown ASM directive");
157 } for_each_statement_end(ctx, s);
160 asm_error(ctx, "No %%arch defined");
161 if (!have_start_label)
162 asm_info(ctx, "Using start address 0");
165 static bool is_possible_imm(unsigned int imm)
169 /* Immediates are only possible up to 16bit (wordsize). */
172 if (imm & (1 << 15)) {
173 if ((imm & mask) != mask &&
177 if ((imm & mask) != 0)
184 static unsigned int immediate_nr_bits(struct assembler_context *ctx)
188 return 10; /* 10 bits */
190 return 11; /* 11 bits */
192 asm_error(ctx, "Internal error: immediate_nr_bits unknown arch\n");
195 static bool is_valid_imm(struct assembler_context *ctx,
199 unsigned int immediate_size;
201 /* This function checks if the immediate value is representable
202 * as a native immediate operand.
204 * For v5 architecture the immediate can be 10bit long.
205 * For v15 architecture the immediate can be 11bit long.
207 * The value is sign-extended, so we allow values
208 * of 0xFFFA, for example.
211 if (!is_possible_imm(imm))
215 immediate_size = immediate_nr_bits(ctx);
217 /* First create a mask with all possible bits for
218 * an immediate value unset. */
219 mask = (~0 << immediate_size) & 0xFFFF;
220 /* Is the sign bit of the immediate set? */
221 if (imm & (1 << (immediate_size - 1))) {
222 /* Yes, so all bits above that must also
223 * be set, otherwise we can't represent this
224 * value in an operand. */
225 if ((imm & mask) != mask)
228 /* All bits above the immediate's size must
237 /* This checks if the value is nonzero and a power of two. */
238 static bool is_power_of_two(unsigned int value)
240 return (value && ((value & (value - 1)) == 0));
243 /* This checks if all bits set in the mask are contiguous.
244 * Zero is also considered a contiguous mask. */
245 static bool is_contiguous_bitmask(unsigned int mask)
247 unsigned int low_zeros_mask;
252 /* Turn the lowest zeros of the mask into a bitmask.
253 * Example: 0b00011000 -> 0b00000111 */
254 low_zeros_mask = (mask - 1) & ~mask;
255 /* Adding the low_zeros_mask to the original mask
256 * basically is a bitwise OR operation.
257 * If the original mask was contiguous, we end up with a
258 * contiguous bitmask from bit 0 to the highest bit
259 * set in the original mask. Adding 1 will result in a single
260 * bit set, which is a power of two. */
261 is_contiguous = is_power_of_two(mask + low_zeros_mask + 1);
263 return is_contiguous;
266 static unsigned int generate_imm_operand(struct assembler_context *ctx,
267 const struct immediate *imm)
269 unsigned int val, tmp;
277 if (!is_valid_imm(ctx, tmp)) {
278 asm_warn(ctx, "IMMEDIATE 0x%X (%d) too long "
279 "(> %u bits + sign). Did you intend to "
280 "use implicit sign extension?",
281 tmp, (int)tmp, immediate_nr_bits(ctx) - 1);
293 static unsigned int generate_reg_operand(struct assembler_context *ctx,
294 const struct registr *reg)
296 unsigned int val = 0;
303 if (reg->nr & ~0x3F) /* REVISIT: 128 regs for v15 arch possible? Probably not... */
304 asm_error(ctx, "GPR-nr too big");
311 if (reg->nr & ~0x1FF)
312 asm_error(ctx, "SPR-nr too big");
320 asm_error(ctx, "OFFR-nr too big");
324 asm_error(ctx, "generate_reg_operand() regtype");
330 static unsigned int generate_mem_operand(struct assembler_context *ctx,
331 const struct memory *mem)
333 unsigned int val = 0, off, reg, off_mask, reg_shift;
341 asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 11 bits)", off);
347 asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 12 bits)", off);
352 asm_error(ctx, "Internal error: generate_mem_operand invalid arch");
369 asm_error(ctx, "Internal error: MEM_INDIRECT invalid arch\n");
374 if (off & ~off_mask) {
375 asm_warn(ctx, "INDIRECT memoffset 0x%X too long (> %u bits)",
380 /* Assembler bug. The parser shouldn't pass this value. */
381 asm_error(ctx, "OFFR-nr too big");
384 asm_warn(ctx, "Using offset register 6. This register is broken "
385 "on certain devices. Use off0 to off5 only.");
388 val |= (reg << reg_shift);
391 asm_error(ctx, "generate_mem_operand() memtype");
397 static void generate_operand(struct assembler_context *ctx,
398 const struct operand *oper,
399 struct out_operand *out)
401 out->type = OUTOPER_NORMAL;
403 switch (oper->type) {
405 out->u.operand = generate_imm_operand(ctx, oper->u.imm);
408 out->u.operand = generate_reg_operand(ctx, oper->u.reg);
411 out->u.operand = generate_mem_operand(ctx, oper->u.mem);
414 out->type = OUTOPER_LABELREF;
415 out->u.label = oper->u.label;
418 out->u.operand = oper->u.addr->addr;
421 out->u.operand = oper->u.raw;
424 asm_error(ctx, "generate_operand() operstate");
428 static struct code_output * do_assemble_insn(struct assembler_context *ctx,
429 struct instruction *insn,
436 struct code_output *out;
437 struct label *labelref = NULL;
438 struct operand *oper;
439 int have_spr_operand = 0;
440 int have_mem_operand = 0;
442 out = xmalloc(sizeof(*out));
443 INIT_LIST_HEAD(&out->list);
444 out->opcode = opcode;
447 if (ARRAY_SIZE(out->operands) > ARRAY_SIZE(ol->oper))
448 asm_error(ctx, "Internal operand array confusion");
450 for (i = 0; i < ARRAY_SIZE(out->operands); i++) {
455 /* If this is an INPUT operand (first or second), we must
456 * make sure that not both are accessing SPR or MEMORY.
457 * The device only supports one SPR or MEMORY operand in
458 * the input operands. */
459 if ((i == 0) || (i == 1)) {
460 if ((oper->type == OPER_REG) &&
461 (oper->u.reg->type == SPR)) {
462 if (have_spr_operand)
463 asm_error(ctx, "Multiple SPR input operands in one instruction");
464 have_spr_operand = 1;
466 if (oper->type == OPER_MEM) {
467 if (have_mem_operand)
468 asm_error(ctx, "Multiple MEMORY input operands in on instruction");
469 have_mem_operand = 1;
473 generate_operand(ctx, oper, &out->operands[i]);
477 asm_error(ctx, "Internal error: nr_oper at "
478 "lowlevel do_assemble_insn");
480 list_add_tail(&out->list, &ctx->output);
485 static void do_assemble_ret(struct assembler_context *ctx,
486 struct instruction *insn,
489 struct code_output *out;
491 /* Get the previous instruction and check whether it
492 * is a jump instruction. */
493 list_for_each_entry_reverse(out, &ctx->output, list) {
494 /* Search the last insn. */
495 if (out->type == OUT_INSN) {
496 if (out->is_jump_insn) {
497 asm_warn(ctx, "RET instruction directly after "
498 "jump instruction. The hardware won't like this.");
503 do_assemble_insn(ctx, insn, opcode);
506 static unsigned int merge_ext_into_opcode(struct assembler_context *ctx,
508 struct instruction *insn)
512 unsigned int mask, shift;
516 mask = ol->oper[0]->u.raw;
518 asm_error(ctx, "opcode MASK extension too big (> 0xF)");
519 shift = ol->oper[1]->u.raw;
521 asm_error(ctx, "opcode SHIFT extension too big (> 0xF)");
522 opcode |= (mask << 4);
524 ol->oper[0] = ol->oper[2];
525 ol->oper[1] = ol->oper[3];
526 ol->oper[2] = ol->oper[4];
531 static unsigned int merge_external_jmp_into_opcode(struct assembler_context *ctx,
533 struct instruction *insn)
535 struct operand *fake;
536 struct registr *fake_reg;
537 struct operand *target;
544 cond = ol->oper[0]->u.imm->imm;
546 asm_error(ctx, "External jump condition value too big (> 0xFF)");
548 target = ol->oper[1];
549 memset(ol->oper, 0, sizeof(ol->oper));
551 /* This instruction has two fake r0 operands
552 * at position 0 and 1. */
553 fake = xmalloc(sizeof(*fake));
554 fake_reg = xmalloc(sizeof(*fake_reg));
555 fake->type = OPER_REG;
556 fake->u.reg = fake_reg;
557 fake_reg->type = GPR;
562 ol->oper[2] = target;
567 static void assemble_instruction(struct assembler_context *ctx,
568 struct instruction *insn);
570 static void emulate_mov_insn(struct assembler_context *ctx,
571 struct instruction *insn)
573 struct instruction em_insn;
574 struct operlist em_ol;
575 struct operand em_op_shift;
576 struct operand em_op_mask;
577 struct operand em_op_x;
578 struct operand em_op_y;
579 struct immediate em_imm_x;
580 struct immediate em_imm_y;
582 struct operand *in, *out;
585 /* This is a pseudo-OP. We emulate it by OR or ORX */
587 in = insn->operands->oper[0];
588 out = insn->operands->oper[1];
593 em_op_x.type = OPER_IMM;
594 em_op_x.u.imm = &em_imm_x;
595 em_ol.oper[1] = &em_op_x;
598 if (in->type == OPER_IMM) {
599 tmp = in->u.imm->imm;
600 if (!is_possible_imm(tmp))
601 asm_error(ctx, "MOV operand 0x%X > 16bit", tmp);
602 if (!is_valid_imm(ctx, tmp)) {
603 /* Immediate too big for plain OR */
606 em_op_mask.type = OPER_RAW;
607 em_op_mask.u.raw = 0x7;
608 em_op_shift.type = OPER_RAW;
609 em_op_shift.u.raw = 0x8;
611 em_imm_x.imm = (tmp & 0xFF00) >> 8;
612 em_op_x.type = OPER_IMM;
613 em_op_x.u.imm = &em_imm_x;
615 em_imm_y.imm = (tmp & 0x00FF);
616 em_op_y.type = OPER_IMM;
617 em_op_y.u.imm = &em_imm_y;
619 em_ol.oper[0] = &em_op_mask;
620 em_ol.oper[1] = &em_op_shift;
621 em_ol.oper[2] = &em_op_x;
622 em_ol.oper[3] = &em_op_y;
627 em_insn.operands = &em_ol;
628 assemble_instruction(ctx, &em_insn); /* recurse */
631 static void emulate_jmp_insn(struct assembler_context *ctx,
632 struct instruction *insn)
634 struct instruction em_insn;
635 struct operlist em_ol;
636 struct immediate em_condition;
637 struct operand em_cond_op;
639 /* This is a pseudo-OP. We emulate it with
640 * JEXT 0x7F, target */
642 em_insn.op = OP_JEXT;
643 em_condition.imm = 0x7F; /* Ext cond: Always true */
644 em_cond_op.type = OPER_IMM;
645 em_cond_op.u.imm = &em_condition;
646 em_ol.oper[0] = &em_cond_op;
647 em_ol.oper[1] = insn->operands->oper[0]; /* Target */
648 em_insn.operands = &em_ol;
650 assemble_instruction(ctx, &em_insn); /* recurse */
653 static void emulate_jand_insn(struct assembler_context *ctx,
654 struct instruction *insn,
657 struct code_output *out;
658 struct instruction em_insn;
659 struct operlist em_ol;
660 struct operand em_op_shift;
661 struct operand em_op_mask;
662 struct operand em_op_y;
663 struct immediate em_imm;
665 struct operand *oper0, *oper1, *oper2;
666 struct operand *imm_oper = NULL;
668 int first_bit, last_bit;
670 oper0 = insn->operands->oper[0];
671 oper1 = insn->operands->oper[1];
672 oper2 = insn->operands->oper[2];
674 if (oper0->type == OPER_IMM)
676 if (oper1->type == OPER_IMM)
678 if (oper0->type == OPER_IMM && oper1->type == OPER_IMM)
682 /* We have a single immediate operand.
683 * Check if it's representable by a normal JAND insn.
685 tmp = imm_oper->u.imm->imm;
686 if (!is_valid_imm(ctx, tmp)) {
687 /* Nope, this must be emulated by JZX/JNZX */
688 if (!is_contiguous_bitmask(tmp)) {
689 asm_error(ctx, "Long bitmask 0x%X is not contiguous",
693 first_bit = ffs(tmp);
694 last_bit = ffs(~(tmp >> (first_bit - 1))) - 1 + first_bit - 1;
699 em_insn.op = OP_JNZX;
700 em_op_shift.type = OPER_RAW;
701 em_op_shift.u.raw = first_bit - 1;
702 em_op_mask.type = OPER_RAW;
703 em_op_mask.u.raw = last_bit - first_bit;
706 em_op_y.type = OPER_IMM;
707 em_op_y.u.imm = &em_imm;
709 em_ol.oper[0] = &em_op_mask;
710 em_ol.oper[1] = &em_op_shift;
711 if (oper0->type != OPER_IMM)
712 em_ol.oper[2] = oper0;
714 em_ol.oper[2] = oper1;
715 em_ol.oper[3] = &em_op_y;
716 em_ol.oper[4] = oper2;
718 em_insn.operands = &em_ol;
720 assemble_instruction(ctx, &em_insn); /* recurse */
725 /* Do a normal JAND/JNAND instruction */
727 out = do_assemble_insn(ctx, insn, 0x040 | 0x1);
729 out = do_assemble_insn(ctx, insn, 0x040);
730 out->is_jump_insn = 1;
733 static void assemble_instruction(struct assembler_context *ctx,
734 struct instruction *insn)
736 struct code_output *out;
741 do_assemble_insn(ctx, insn, 0x1C0);
744 do_assemble_insn(ctx, insn, 0x1C2);
747 do_assemble_insn(ctx, insn, 0x1C1);
750 do_assemble_insn(ctx, insn, 0x1C3);
753 do_assemble_insn(ctx, insn, 0x1D0);
756 do_assemble_insn(ctx, insn, 0x1D2);
759 do_assemble_insn(ctx, insn, 0x1D1);
762 do_assemble_insn(ctx, insn, 0x1D3);
765 do_assemble_insn(ctx, insn, 0x130);
768 do_assemble_insn(ctx, insn, 0x160);
771 do_assemble_insn(ctx, insn, 0x140);
774 do_assemble_insn(ctx, insn, 0x170);
777 do_assemble_insn(ctx, insn, 0x120);
780 opcode = merge_ext_into_opcode(ctx, 0x200, insn);
781 do_assemble_insn(ctx, insn, opcode);
784 do_assemble_insn(ctx, insn, 0x110);
787 do_assemble_insn(ctx, insn, 0x1A0);
790 do_assemble_insn(ctx, insn, 0x1B0);
793 do_assemble_insn(ctx, insn, 0x150);
796 opcode = merge_ext_into_opcode(ctx, 0x300, insn);
797 do_assemble_insn(ctx, insn, opcode);
800 emulate_mov_insn(ctx, insn);
803 emulate_jmp_insn(ctx, insn);
806 emulate_jand_insn(ctx, insn, 0);
809 emulate_jand_insn(ctx, insn, 1);
812 out = do_assemble_insn(ctx, insn, 0x050);
813 out->is_jump_insn = 1;
816 out = do_assemble_insn(ctx, insn, 0x050 | 0x1);
817 out->is_jump_insn = 1;
820 out = do_assemble_insn(ctx, insn, 0x0D0);
821 out->is_jump_insn = 1;
824 out = do_assemble_insn(ctx, insn, 0x0D0 | 0x1);
825 out->is_jump_insn = 1;
828 out = do_assemble_insn(ctx, insn, 0x0D2);
829 out->is_jump_insn = 1;
832 out = do_assemble_insn(ctx, insn, 0x0D2 | 0x1);
833 out->is_jump_insn = 1;
836 out = do_assemble_insn(ctx, insn, 0x0D4);
837 out->is_jump_insn = 1;
840 out = do_assemble_insn(ctx, insn, 0x0D4 | 0x1);
841 out->is_jump_insn = 1;
844 out = do_assemble_insn(ctx, insn, 0x0DA);
845 out->is_jump_insn = 1;
848 out = do_assemble_insn(ctx, insn, 0x0DA | 0x1);
849 out->is_jump_insn = 1;
852 out = do_assemble_insn(ctx, insn, 0x0DC);
855 out = do_assemble_insn(ctx, insn, 0x0DC | 0x1);
856 out->is_jump_insn = 1;
859 opcode = merge_ext_into_opcode(ctx, 0x400, insn);
860 out = do_assemble_insn(ctx, insn, opcode);
861 out->is_jump_insn = 1;
864 opcode = merge_ext_into_opcode(ctx, 0x500, insn);
865 out = do_assemble_insn(ctx, insn, opcode);
866 out->is_jump_insn = 1;
869 opcode = merge_external_jmp_into_opcode(ctx, 0x700, insn);
870 out = do_assemble_insn(ctx, insn, opcode);
871 out->is_jump_insn = 1;
874 opcode = merge_external_jmp_into_opcode(ctx, 0x600, insn);
875 out = do_assemble_insn(ctx, insn, opcode);
876 out->is_jump_insn = 1;
880 asm_error(ctx, "'call' instruction is only supported on arch 5");
881 do_assemble_insn(ctx, insn, 0x002);
885 asm_error(ctx, "'calls' instruction is only supported on arch 15");
886 do_assemble_insn(ctx, insn, 0x004);
890 asm_error(ctx, "'ret' instruction is only supported on arch 5");
891 do_assemble_ret(ctx, insn, 0x003);
895 asm_error(ctx, "'rets' instruction is only supported on arch 15");
896 do_assemble_insn(ctx, insn, 0x005);
902 do_assemble_insn(ctx, insn, 0x1E0);
905 do_assemble_insn(ctx, insn, 0x001);
908 do_assemble_insn(ctx, insn, insn->opcode);
911 asm_error(ctx, "Unknown op");
915 static void assemble_instructions(struct assembler_context *ctx)
918 struct instruction *insn;
919 struct code_output *out;
921 if (ctx->start_label) {
922 /* Generate a jump instruction at offset 0 to
923 * jump to the code start.
925 struct instruction sjmp;
929 oper.type = OPER_LABEL;
930 oper.u.label = ctx->start_label;
935 assemble_instruction(ctx, &sjmp);
936 out = list_entry(ctx->output.next, struct code_output, list);
937 out->is_start_insn = 1;
940 for_each_statement(ctx, s) {
945 assemble_instruction(ctx, insn);
948 out = xmalloc(sizeof(*out));
949 INIT_LIST_HEAD(&out->list);
950 out->type = OUT_LABEL;
951 out->labelname = s->u.label->name;
953 list_add_tail(&out->list, &ctx->output);
958 } for_each_statement_end(ctx, s);
961 /* Resolve a label reference to the address it points to. */
962 static int get_labeladdress(struct assembler_context *ctx,
963 struct code_output *this_insn,
964 struct label *labelref)
966 struct code_output *c;
970 switch (labelref->direction) {
971 case LABELREF_ABSOLUTE:
972 list_for_each_entry(c, &ctx->output, list) {
973 if (c->type != OUT_LABEL)
975 if (strcmp(c->labelname, labelref->name) != 0)
978 asm_error(ctx, "Ambiguous label reference \"%s\"",
982 address = c->address;
985 case LABELREF_RELATIVE_BACK:
986 for (c = list_entry(this_insn->list.prev, typeof(*c), list);
987 &c->list != &ctx->output;
988 c = list_entry(c->list.prev, typeof(*c), list)) {
989 if (c->type != OUT_LABEL)
991 if (strcmp(c->labelname, labelref->name) == 0) {
993 address = c->address;
998 case LABELREF_RELATIVE_FORWARD:
999 for (c = list_entry(this_insn->list.next, typeof(*c), list);
1000 &c->list != &ctx->output;
1001 c = list_entry(c->list.next, typeof(*c), list)) {
1002 if (c->type != OUT_LABEL)
1004 if (strcmp(c->labelname, labelref->name) == 0) {
1006 address = c->address;
1016 static void resolve_labels(struct assembler_context *ctx)
1018 struct code_output *c;
1021 unsigned int current_address;
1023 /* Calculate the absolute addresses for each instruction. */
1024 recalculate_addresses:
1025 current_address = 0;
1026 list_for_each_entry(c, &ctx->output, list) {
1029 c->address = current_address;
1033 c->address = current_address;
1038 /* Resolve the symbolic label references. */
1039 list_for_each_entry(c, &ctx->output, list) {
1042 if (c->is_start_insn) {
1043 /* If the first %start-jump jumps to 001, we can
1044 * optimize it away, as it's unneeded.
1047 if (c->operands[i].type != OUTOPER_LABELREF)
1048 asm_error(ctx, "Internal error, %%start insn oper 2 not labelref");
1049 if (c->operands[i].u.label->direction != LABELREF_ABSOLUTE)
1050 asm_error(ctx, "%%start label reference not absolute");
1051 addr = get_labeladdress(ctx, c, c->operands[i].u.label);
1053 goto does_not_exist;
1055 list_del(&c->list); /* Kill it */
1056 goto recalculate_addresses;
1060 for (i = 0; i < ARRAY_SIZE(c->operands); i++) {
1061 if (c->operands[i].type != OUTOPER_LABELREF)
1063 addr = get_labeladdress(ctx, c, c->operands[i].u.label);
1065 goto does_not_exist;
1066 c->operands[i].u.operand = addr;
1068 /* Is not a jump target.
1069 * Make it be an immediate */
1071 c->operands[i].u.operand |= 0xC00;
1072 else if (ctx->arch == 15)
1073 c->operands[i].u.operand |= 0xC00 << 1;
1075 asm_error(ctx, "Internal error: label res imm");
1086 asm_error(ctx, "Label \"%s\" does not exist",
1087 c->operands[i].u.label->name);
1090 static void emit_code(struct assembler_context *ctx)
1094 struct code_output *c;
1096 unsigned char outbuf[8];
1097 unsigned int insn_count = 0, insn_count_limit;
1098 struct fw_header hdr;
1101 fd = fopen(fn, "w+");
1103 fprintf(stderr, "Could not open microcode output file \"%s\"\n", fn);
1106 if (IS_VERBOSE_DEBUG)
1107 printf("\nCode:\n");
1109 list_for_each_entry(c, &ctx->output, list) {
1119 switch (cmdargs.outformat) {
1125 memset(&hdr, 0, sizeof(hdr));
1126 hdr.type = FW_TYPE_UCODE;
1127 hdr.ver = FW_HDR_VER;
1128 hdr.size = cpu_to_be32(8 * insn_count);
1129 if (fwrite(&hdr, sizeof(hdr), 1, fd) != 1) {
1130 fprintf(stderr, "Could not write microcode outfile\n");
1136 switch (ctx->arch) {
1138 insn_count_limit = NUM_INSN_LIMIT_R5;
1141 insn_count_limit = ~0; //FIXME limit currently unknown.
1144 asm_error(ctx, "Internal error: emit_code unknown arch\n");
1146 if (insn_count > insn_count_limit)
1147 asm_warn(ctx, "Generating more than %u instructions. This "
1148 "will overflow the device microcode memory.",
1151 list_for_each_entry(c, &ctx->output, list) {
1154 if (IS_VERBOSE_DEBUG) {
1155 printf("%03X %04X,%04X,%04X\n",
1157 c->operands[0].u.operand,
1158 c->operands[1].u.operand,
1159 c->operands[2].u.operand);
1162 switch (ctx->arch) {
1165 code |= ((uint64_t)c->operands[2].u.operand);
1166 code |= ((uint64_t)c->operands[1].u.operand) << 12;
1167 code |= ((uint64_t)c->operands[0].u.operand) << 24;
1168 code |= ((uint64_t)c->opcode) << 36;
1172 code |= ((uint64_t)c->operands[2].u.operand);
1173 code |= ((uint64_t)c->operands[1].u.operand) << 13;
1174 code |= ((uint64_t)c->operands[0].u.operand) << 26;
1175 code |= ((uint64_t)c->opcode) << 39;
1178 asm_error(ctx, "No emit format for arch %u",
1182 switch (cmdargs.outformat) {
1185 code = ((code & (uint64_t)0xFFFFFFFF00000000ULL) >> 32) |
1186 ((code & (uint64_t)0x00000000FFFFFFFFULL) << 32);
1187 outbuf[0] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
1188 outbuf[1] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
1189 outbuf[2] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
1190 outbuf[3] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
1191 outbuf[4] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
1192 outbuf[5] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
1193 outbuf[6] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
1194 outbuf[7] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
1197 outbuf[7] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
1198 outbuf[6] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
1199 outbuf[5] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
1200 outbuf[4] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
1201 outbuf[3] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
1202 outbuf[2] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
1203 outbuf[1] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
1204 outbuf[0] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
1208 if (fwrite(&outbuf, ARRAY_SIZE(outbuf), 1, fd) != 1) {
1209 fprintf(stderr, "Could not write microcode outfile\n");
1218 if (cmdargs.print_sizes) {
1219 printf("%s: text = %u instructions (%u bytes)\n",
1221 (unsigned int)(insn_count * sizeof(uint64_t)));
1227 static void assemble(void)
1229 struct assembler_context ctx;
1231 memset(&ctx, 0, sizeof(ctx));
1232 INIT_LIST_HEAD(&ctx.output);
1234 eval_directives(&ctx);
1235 assemble_instructions(&ctx);
1236 resolve_labels(&ctx);
1240 static void initialize(void)
1242 INIT_LIST_HEAD(&infile.sl);
1243 INIT_LIST_HEAD(&infile.ivals);
1245 if (IS_INSANE_DEBUG)
1249 #endif /* YYDEBUG */
1252 int main(int argc, char **argv)
1256 err = parse_args(argc, argv);
1263 err = open_input_file();
1269 assemble_initvals();
1273 /* Lazyman simply leaks all allocated memory. */