2 * Copyright (C) 2006-2007 Michael Buesch <mb@bu3sch.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
26 extern int yyparse(void);
30 const char *infile_name;
31 const char *outfile_name;
41 unsigned int operand; /* For NORMAL */
42 struct label *label; /* For LABELREF */
52 /* Set to true, if this is a jump instruction.
53 * This is only used when assembling RET to check
54 * whether the previous instruction was a jump or not. */
58 struct out_operand operands[3];
60 /* The absolute address of this instruction.
61 * Only used in resolve_labels(). */
64 const char *labelname; /* only for OUT_LABEL */
65 /* Set to 1, if this is the %start instruction. */
68 struct list_head list;
71 struct assembler_context {
72 /* The architecture version (802.11 core revision) */
75 struct label *start_label;
78 struct statement *cur_stmt;
80 struct list_head output;
84 #define for_each_statement(ctx, s) \
85 list_for_each_entry(s, &infile.sl, list) { \
88 #define for_each_statement_end(ctx, s) \
89 } do { ctx->cur_stmt = NULL; } while (0)
91 #define _msg_helper(type, stmt, msg, x...) do { \
92 fprintf(stderr, "Assembler " type); \
94 fprintf(stderr, " (file \"%s\", line %u)", \
98 fprintf(stderr, ":\n " msg "\n" ,##x); \
101 #define asm_error(ctx, msg, x...) do { \
102 _msg_helper("ERROR", (ctx)->cur_stmt, msg ,##x); \
106 #define asm_warn(ctx, msg, x...) \
107 _msg_helper("warning", (ctx)->cur_stmt, msg ,##x)
109 #define asm_info(ctx, msg, x...) \
110 _msg_helper("info", (ctx)->cur_stmt, msg ,##x)
113 static void eval_directives(struct assembler_context *ctx)
118 int have_start_label = 0;
121 for_each_statement(ctx, s) {
122 if (s->type == STMT_ASMDIR) {
127 asm_error(ctx, "Multiple %%arch definitions");
128 ctx->arch = ad->u.arch;
129 if (ctx->arch != 5 && ctx->arch != 15) {
130 asm_error(ctx, "Architecture version %u unsupported",
136 if (have_start_label)
137 asm_error(ctx, "Multiple %%start definitions");
138 ctx->start_label = ad->u.start;
139 have_start_label = 1;
142 asm_error(ctx, "Unknown ASM directive");
145 } for_each_statement_end(ctx, s);
148 asm_error(ctx, "No %%arch defined");
149 if (!have_start_label)
150 asm_info(ctx, "Using start address 0");
153 static bool is_possible_imm(unsigned int imm)
157 /* Immediates are only possible up to 16bit (wordsize). */
160 if (imm & (1 << 15)) {
161 if ((imm & mask) != mask &&
165 if ((imm & mask) != 0)
172 static bool is_valid_imm(struct assembler_context *ctx,
176 unsigned int immediate_size;
178 /* This function checks if the immediate value is representable
179 * as a native immediate operand.
181 * For v5 architecture the immediate can be 10bit long.
182 * For v15 architecture the immediate can be 11bit long.
184 * The value is sign-extended, so we allow values
185 * of 0xFFFA, for example.
188 if (!is_possible_imm(imm))
192 if (ctx->arch == 5) {
193 immediate_size = 10; /* 10bit */
194 } else if (ctx->arch == 15) {
195 immediate_size = 11; /* 11bit */
197 asm_error(ctx, "Unknown immediate size for arch %u",
201 /* First create a mask with all possible bits for
202 * an immediate value unset. */
203 mask = (~0 << immediate_size) & 0xFFFF;
204 /* Is the sign bit of the immediate set? */
205 if (imm & (1 << (immediate_size - 1))) {
206 /* Yes, so all bits above that must also
207 * be set, otherwise we can't represent this
208 * value in an operand. */
209 if ((imm & mask) != mask)
212 /* All bits above the immediate's size must
221 /* This checks if the value is nonzero and a power of two. */
222 static bool is_power_of_two(unsigned int value)
224 return (value && ((value & (value - 1)) == 0));
227 /* This checks if all bits set in the mask are contiguous.
228 * Zero is also considered a contiguous mask. */
229 static bool is_contiguous_bitmask(unsigned int mask)
231 unsigned int low_zeros_mask;
236 /* Turn the lowest zeros of the mask into a bitmask.
237 * Example: 0b00011000 -> 0b00000111 */
238 low_zeros_mask = (mask - 1) & ~mask;
239 /* Adding the low_zeros_mask to the original mask
240 * basically is a bitwise OR operation.
241 * If the original mask was contiguous, we end up with a
242 * contiguous bitmask from bit 0 to the highest bit
243 * set in the original mask. Adding 1 will result in a single
244 * bit set, which is a power of two. */
245 is_contiguous = is_power_of_two(mask + low_zeros_mask + 1);
247 return is_contiguous;
250 static unsigned int generate_imm_operand(struct assembler_context *ctx,
251 const struct immediate *imm)
253 unsigned int val, tmp;
256 /* format: 0b11ii iiii iiii */
263 if (!is_valid_imm(ctx, tmp)) {
264 asm_warn(ctx, "IMMEDIATE 0x%X (%d) too long "
265 "(> 9 bits + sign). Did you intend to "
266 "use implicit sign extension?",
279 static unsigned int generate_reg_operand(struct assembler_context *ctx,
280 const struct registr *reg)
282 unsigned int val = 0;
286 /* format: 0b1011 11rr rrrr */
290 if (reg->nr & ~0x3F) //FIXME 128 regs for v15 arch possible?
291 asm_error(ctx, "GPR-nr too big");
295 /* format: 0b100. .... .... */
299 if (reg->nr & ~0x1FF)
300 asm_error(ctx, "SPR-nr too big");
304 /* format: 0b1000 0110 0rrr */
309 asm_error(ctx, "OFFR-nr too big");
313 asm_error(ctx, "generate_reg_operand() regtype");
319 static unsigned int generate_mem_operand(struct assembler_context *ctx,
320 const struct memory *mem)
322 unsigned int val = 0, off, reg;
326 /* format: 0b0mmm mmmm mmmm */
328 if (off & ~0x7FF) { //FIXME 4096 words for v15 arch possible?
329 asm_warn(ctx, "DIRECT memoffset 0x%X too long (> 11 bits)", off);
335 /* format: 0b101r rroo oooo */
339 //FIXME what about v15 arch?
341 asm_warn(ctx, "INDIRECT memoffset 0x%X too long (> 6 bits)", off);
345 asm_error(ctx, "OFFR-nr too big");
350 asm_error(ctx, "generate_mem_operand() memtype");
356 static void generate_operand(struct assembler_context *ctx,
357 const struct operand *oper,
358 struct out_operand *out)
360 out->type = OUTOPER_NORMAL;
362 switch (oper->type) {
364 out->u.operand = generate_imm_operand(ctx, oper->u.imm);
367 out->u.operand = generate_reg_operand(ctx, oper->u.reg);
370 out->u.operand = generate_mem_operand(ctx, oper->u.mem);
373 out->type = OUTOPER_LABELREF;
374 out->u.label = oper->u.label;
377 out->u.operand = oper->u.addr->addr;
380 out->u.operand = oper->u.raw;
383 asm_error(ctx, "generate_operand() operstate");
387 static struct code_output * do_assemble_insn(struct assembler_context *ctx,
388 struct instruction *insn,
395 struct code_output *out;
396 struct label *labelref = NULL;
397 struct operand *oper;
398 int have_spr_operand = 0;
399 int have_mem_operand = 0;
401 out = xmalloc(sizeof(*out));
402 INIT_LIST_HEAD(&out->list);
403 out->opcode = opcode;
406 if (ARRAY_SIZE(out->operands) > ARRAY_SIZE(ol->oper))
407 asm_error(ctx, "Internal operand array confusion");
409 for (i = 0; i < ARRAY_SIZE(out->operands); i++) {
414 /* If this is an INPUT operand (first or second), we must
415 * make sure that not both are accessing SPR or MEMORY.
416 * The device only supports one SPR or MEMORY operand in
417 * the input operands. */
418 if ((i == 0) || (i == 1)) {
419 if ((oper->type == OPER_REG) &&
420 (oper->u.reg->type == SPR)) {
421 if (have_spr_operand)
422 asm_error(ctx, "Multiple SPR input operands in one instruction");
423 have_spr_operand = 1;
425 if (oper->type == OPER_MEM) {
426 if (have_mem_operand)
427 asm_error(ctx, "Multiple MEMORY input operands in on instruction");
428 have_mem_operand = 1;
432 generate_operand(ctx, oper, &out->operands[i]);
436 asm_error(ctx, "Internal error: nr_oper at "
437 "lowlevel do_assemble_insn");
439 list_add_tail(&out->list, &ctx->output);
444 static unsigned int merge_ext_into_opcode(struct assembler_context *ctx,
446 struct instruction *insn)
450 unsigned int mask, shift;
454 mask = ol->oper[0]->u.raw;
456 asm_error(ctx, "opcode MASK extension too big (> 0xF)");
457 shift = ol->oper[1]->u.raw;
459 asm_error(ctx, "opcode SHIFT extension too big (> 0xF)");
460 opcode |= (mask << 4);
462 ol->oper[0] = ol->oper[2];
463 ol->oper[1] = ol->oper[3];
464 ol->oper[2] = ol->oper[4];
469 static unsigned int merge_external_jmp_into_opcode(struct assembler_context *ctx,
471 struct instruction *insn)
473 struct operand *fake;
474 struct registr *fake_reg;
475 struct operand *target;
482 cond = ol->oper[0]->u.imm->imm;
484 asm_error(ctx, "External jump condition value too big (> 0xFF)");
486 target = ol->oper[1];
487 memset(ol->oper, 0, sizeof(ol->oper));
489 /* This instruction has two fake r0 operands
490 * at position 0 and 1. */
491 fake = xmalloc(sizeof(*fake));
492 fake_reg = xmalloc(sizeof(*fake_reg));
493 fake->type = OPER_REG;
494 fake->u.reg = fake_reg;
495 fake_reg->type = GPR;
500 ol->oper[2] = target;
505 static void assemble_instruction(struct assembler_context *ctx,
506 struct instruction *insn);
508 static void emulate_mov_insn(struct assembler_context *ctx,
509 struct instruction *insn)
511 struct instruction em_insn;
512 struct operlist em_ol;
513 struct operand em_op_shift;
514 struct operand em_op_mask;
515 struct operand em_op_x;
516 struct operand em_op_y;
517 struct immediate em_imm_x;
518 struct immediate em_imm_y;
520 struct operand *in, *out;
523 /* This is a pseudo-OP. We emulate it by OR or ORX */
525 in = insn->operands->oper[0];
526 out = insn->operands->oper[1];
531 em_op_x.type = OPER_IMM;
532 em_op_x.u.imm = &em_imm_x;
533 em_ol.oper[1] = &em_op_x;
536 if (in->type == OPER_IMM) {
537 tmp = in->u.imm->imm;
538 if (!is_possible_imm(tmp))
539 asm_error(ctx, "MOV operand 0x%X > 16bit", tmp);
540 if (!is_valid_imm(ctx, tmp)) {
541 /* Immediate too big for plain OR */
544 em_op_mask.type = OPER_RAW;
545 em_op_mask.u.raw = 0x7;
546 em_op_shift.type = OPER_RAW;
547 em_op_shift.u.raw = 0x8;
549 em_imm_x.imm = (tmp & 0xFF00) >> 8;
550 em_op_x.type = OPER_IMM;
551 em_op_x.u.imm = &em_imm_x;
553 em_imm_y.imm = (tmp & 0x00FF);
554 em_op_y.type = OPER_IMM;
555 em_op_y.u.imm = &em_imm_y;
557 em_ol.oper[0] = &em_op_mask;
558 em_ol.oper[1] = &em_op_shift;
559 em_ol.oper[2] = &em_op_x;
560 em_ol.oper[3] = &em_op_y;
565 em_insn.operands = &em_ol;
566 assemble_instruction(ctx, &em_insn); /* recurse */
569 static void emulate_jmp_insn(struct assembler_context *ctx,
570 struct instruction *insn)
572 struct instruction em_insn;
573 struct operlist em_ol;
574 struct operand em_op;
575 struct immediate em_imm;
577 /* This is a pseudo-OP. We emulate it by JE */
581 em_op.type = OPER_IMM;
582 em_op.u.imm = &em_imm;
583 em_ol.oper[0] = &em_op;
584 em_ol.oper[1] = &em_op;
585 em_ol.oper[2] = insn->operands->oper[0];
586 em_insn.operands = &em_ol;
587 assemble_instruction(ctx, &em_insn); /* recurse */
590 static void emulate_jand_insn(struct assembler_context *ctx,
591 struct instruction *insn,
594 struct code_output *out;
595 struct instruction em_insn;
596 struct operlist em_ol;
597 struct operand em_op_shift;
598 struct operand em_op_mask;
599 struct operand em_op_y;
600 struct immediate em_imm;
602 struct operand *oper0, *oper1, *oper2;
603 struct operand *imm_oper = NULL;
605 int first_bit, last_bit;
607 oper0 = insn->operands->oper[0];
608 oper1 = insn->operands->oper[1];
609 oper2 = insn->operands->oper[2];
611 if (oper0->type == OPER_IMM)
613 if (oper1->type == OPER_IMM)
615 if (oper0->type == OPER_IMM && oper1->type == OPER_IMM)
619 /* We have a single immediate operand.
620 * Check if it's representable by a normal JAND insn.
622 tmp = imm_oper->u.imm->imm;
623 if (!is_valid_imm(ctx, tmp)) {
624 /* Nope, this must be emulated by JZX/JNZX */
625 if (!is_contiguous_bitmask(tmp)) {
626 asm_error(ctx, "Long bitmask 0x%X is not contiguous",
630 first_bit = ffs(tmp);
631 last_bit = ffs(~(tmp >> (first_bit - 1))) - 1 + first_bit - 1;
636 em_insn.op = OP_JNZX;
637 em_op_shift.type = OPER_RAW;
638 em_op_shift.u.raw = first_bit - 1;
639 em_op_mask.type = OPER_RAW;
640 em_op_mask.u.raw = last_bit - first_bit;
643 em_op_y.type = OPER_IMM;
644 em_op_y.u.imm = &em_imm;
646 em_ol.oper[0] = &em_op_mask;
647 em_ol.oper[1] = &em_op_shift;
648 if (oper0->type != OPER_IMM)
649 em_ol.oper[2] = oper0;
651 em_ol.oper[2] = oper1;
652 em_ol.oper[3] = &em_op_y;
653 em_ol.oper[4] = oper2;
655 em_insn.operands = &em_ol;
657 assemble_instruction(ctx, &em_insn); /* recurse */
662 /* Do a normal JAND/JNAND instruction */
664 out = do_assemble_insn(ctx, insn, 0x040 | 0x1);
666 out = do_assemble_insn(ctx, insn, 0x040);
667 out->is_jump_insn = 1;
670 static void assemble_instruction(struct assembler_context *ctx,
671 struct instruction *insn)
673 struct code_output *out;
678 do_assemble_insn(ctx, insn, 0x1C0);
681 do_assemble_insn(ctx, insn, 0x1C2);
684 do_assemble_insn(ctx, insn, 0x1C1);
687 do_assemble_insn(ctx, insn, 0x1C3);
690 do_assemble_insn(ctx, insn, 0x1D0);
693 do_assemble_insn(ctx, insn, 0x1D2);
696 do_assemble_insn(ctx, insn, 0x1D1);
699 do_assemble_insn(ctx, insn, 0x1D3);
702 do_assemble_insn(ctx, insn, 0x130);
705 do_assemble_insn(ctx, insn, 0x160);
708 do_assemble_insn(ctx, insn, 0x140);
711 do_assemble_insn(ctx, insn, 0x170);
714 do_assemble_insn(ctx, insn, 0x120);
717 opcode = merge_ext_into_opcode(ctx, 0x200, insn);
718 do_assemble_insn(ctx, insn, opcode);
721 do_assemble_insn(ctx, insn, 0x110);
724 do_assemble_insn(ctx, insn, 0x1A0);
727 do_assemble_insn(ctx, insn, 0x1B0);
730 do_assemble_insn(ctx, insn, 0x150);
733 opcode = merge_ext_into_opcode(ctx, 0x300, insn);
734 do_assemble_insn(ctx, insn, opcode);
737 emulate_mov_insn(ctx, insn);
740 emulate_jmp_insn(ctx, insn);
743 emulate_jand_insn(ctx, insn, 0);
746 emulate_jand_insn(ctx, insn, 1);
749 out = do_assemble_insn(ctx, insn, 0x050);
750 out->is_jump_insn = 1;
753 out = do_assemble_insn(ctx, insn, 0x050 | 0x1);
754 out->is_jump_insn = 1;
757 out = do_assemble_insn(ctx, insn, 0x0D0);
758 out->is_jump_insn = 1;
761 out = do_assemble_insn(ctx, insn, 0x0D0 | 0x1);
762 out->is_jump_insn = 1;
765 out = do_assemble_insn(ctx, insn, 0x0D2);
766 out->is_jump_insn = 1;
769 out = do_assemble_insn(ctx, insn, 0x0D2 | 0x1);
770 out->is_jump_insn = 1;
773 out = do_assemble_insn(ctx, insn, 0x0D4);
774 out->is_jump_insn = 1;
777 out = do_assemble_insn(ctx, insn, 0x0D4 | 0x1);
778 out->is_jump_insn = 1;
781 out = do_assemble_insn(ctx, insn, 0x0DA);
782 out->is_jump_insn = 1;
785 out = do_assemble_insn(ctx, insn, 0x0DA | 0x1);
786 out->is_jump_insn = 1;
789 out = do_assemble_insn(ctx, insn, 0x0DC);
792 out = do_assemble_insn(ctx, insn, 0x0DC | 0x1);
793 out->is_jump_insn = 1;
796 opcode = merge_ext_into_opcode(ctx, 0x400, insn);
797 out = do_assemble_insn(ctx, insn, opcode);
798 out->is_jump_insn = 1;
801 opcode = merge_ext_into_opcode(ctx, 0x500, insn);
802 out = do_assemble_insn(ctx, insn, opcode);
803 out->is_jump_insn = 1;
806 opcode = merge_external_jmp_into_opcode(ctx, 0x700, insn);
807 out = do_assemble_insn(ctx, insn, opcode);
808 out->is_jump_insn = 1;
811 opcode = merge_external_jmp_into_opcode(ctx, 0x600, insn);
812 out = do_assemble_insn(ctx, insn, opcode);
813 out->is_jump_insn = 1;
816 do_assemble_insn(ctx, insn, 0x002);
819 if (!list_empty(&ctx->output)) {
820 /* Get the previous instruction and check whether it
821 * is a jump instruction. */
822 out = list_entry(ctx->output.prev, struct code_output, list);
823 if (out->is_jump_insn) {
824 asm_error(ctx, "RET instruction directly after "
825 "jump instruction. The hardware won't like this.");
828 do_assemble_insn(ctx, insn, 0x003);
834 do_assemble_insn(ctx, insn, 0x1E0);
837 do_assemble_insn(ctx, insn, 0x001);
840 do_assemble_insn(ctx, insn, insn->opcode);
843 asm_error(ctx, "Unknown op");
847 static void assemble_instructions(struct assembler_context *ctx)
850 struct instruction *insn;
851 struct code_output *out;
853 if (ctx->start_label) {
854 /* Generate a jump instruction at offset 0 to
855 * jump to the code start.
857 struct instruction sjmp;
861 oper.type = OPER_LABEL;
862 oper.u.label = ctx->start_label;
867 assemble_instruction(ctx, &sjmp);
868 out = list_entry(ctx->output.next, struct code_output, list);
869 out->is_start_insn = 1;
872 for_each_statement(ctx, s) {
877 assemble_instruction(ctx, insn);
880 out = xmalloc(sizeof(*out));
881 INIT_LIST_HEAD(&out->list);
882 out->type = OUT_LABEL;
883 out->labelname = s->u.label->name;
885 list_add_tail(&out->list, &ctx->output);
890 } for_each_statement_end(ctx, s);
893 /* Resolve a label reference to the address it points to. */
894 static int get_labeladdress(struct assembler_context *ctx,
895 struct code_output *this_insn,
896 struct label *labelref)
898 struct code_output *c;
902 switch (labelref->direction) {
903 case LABELREF_ABSOLUTE:
904 list_for_each_entry(c, &ctx->output, list) {
905 if (c->type != OUT_LABEL)
907 if (strcmp(c->labelname, labelref->name) != 0)
910 asm_error(ctx, "Ambiguous label reference \"%s\"",
914 address = c->address;
917 case LABELREF_RELATIVE_BACK:
918 for (c = list_entry(this_insn->list.prev, typeof(*c), list);
919 &c->list != &ctx->output;
920 c = list_entry(c->list.prev, typeof(*c), list)) {
921 if (c->type != OUT_LABEL)
923 if (strcmp(c->labelname, labelref->name) == 0) {
925 address = c->address;
930 case LABELREF_RELATIVE_FORWARD:
931 for (c = list_entry(this_insn->list.next, typeof(*c), list);
932 &c->list != &ctx->output;
933 c = list_entry(c->list.next, typeof(*c), list)) {
934 if (c->type != OUT_LABEL)
936 if (strcmp(c->labelname, labelref->name) == 0) {
938 address = c->address;
948 static void resolve_labels(struct assembler_context *ctx)
950 struct code_output *c;
953 unsigned int current_address;
955 /* Calculate the absolute addresses for each instruction. */
956 recalculate_addresses:
958 list_for_each_entry(c, &ctx->output, list) {
961 c->address = current_address;
965 c->address = current_address;
970 /* Resolve the symbolic label references. */
971 list_for_each_entry(c, &ctx->output, list) {
974 if (c->is_start_insn) {
975 /* If the first %start-jump jumps to 001, we can
976 * optimize it away, as it's unneeded.
979 if (c->operands[i].type != OUTOPER_LABELREF)
980 asm_error(ctx, "Internal error, %%start insn oper 2 not labelref");
981 if (c->operands[i].u.label->direction != LABELREF_ABSOLUTE)
982 asm_error(ctx, "%%start label reference not absolute");
983 addr = get_labeladdress(ctx, c, c->operands[i].u.label);
987 list_del(&c->list); /* Kill it */
988 goto recalculate_addresses;
992 for (i = 0; i < ARRAY_SIZE(c->operands); i++) {
993 if (c->operands[i].type != OUTOPER_LABELREF)
995 addr = get_labeladdress(ctx, c, c->operands[i].u.label);
998 c->operands[i].u.operand = addr;
1000 /* Is not a jump target.
1001 * Make it be an immediate */
1003 c->operands[i].u.operand |= 0xC00;
1004 else if (ctx->arch == 15)
1005 c->operands[i].u.operand |= 0xC00 << 1;
1007 asm_error(ctx, "Internal error: label res imm");
1018 asm_error(ctx, "Label \"%s\" does not exist",
1019 c->operands[i].u.label->name);
1022 static void emit_code(struct assembler_context *ctx)
1027 struct code_output *c;
1029 unsigned char outbuf[8];
1030 unsigned int insn_count = 0;
1031 struct fw_header hdr;
1033 fn_len = strlen(outfile_name) + 20;
1034 fn = xmalloc(fn_len);
1035 snprintf(fn, fn_len, "%s.ucode", outfile_name);
1036 fd = fopen(fn, "w+");
1038 fprintf(stderr, "Could not open microcode output file \"%s\"\n", fn);
1042 if (IS_VERBOSE_DEBUG)
1043 fprintf(stderr, "\nCode:\n");
1045 list_for_each_entry(c, &ctx->output, list) {
1055 memset(&hdr, 0, sizeof(hdr));
1056 hdr.type = FW_TYPE_UCODE;
1057 hdr.ver = FW_HDR_VER;
1058 hdr.size = cpu_to_be32(8 * insn_count);
1059 if (fwrite(&hdr, sizeof(hdr), 1, fd) != 1) {
1060 fprintf(stderr, "Could not write microcode outfile\n");
1064 if (insn_count > NUM_INSN_LIMIT)
1065 asm_warn(ctx, "Generating more than %d instructions. This "
1066 "will overflow the device microcode memory.",
1069 list_for_each_entry(c, &ctx->output, list) {
1072 if (IS_VERBOSE_DEBUG) {
1073 fprintf(stderr, "%03X %03X,%03X,%03X\n",
1075 c->operands[0].u.operand,
1076 c->operands[1].u.operand,
1077 c->operands[2].u.operand);
1081 if (ctx->arch == 5) {
1082 /* Instruction binary format is: xxyyyzzz0000oooX
1085 * Xxx is the first operand
1086 * yyy is the second operand
1087 * zzz is the third operand
1089 code |= ((uint64_t)c->operands[2].u.operand);
1090 code |= ((uint64_t)c->operands[1].u.operand) << 12;
1091 code |= ((uint64_t)c->operands[0].u.operand) << 24;
1092 code |= ((uint64_t)c->opcode) << 36;
1093 code = ((code & (uint64_t)0xFFFFFFFF00000000ULL) >> 32) |
1094 ((code & (uint64_t)0x00000000FFFFFFFFULL) << 32);
1095 } else if (ctx->arch == 15) {
1096 code |= ((uint64_t)c->operands[2].u.operand);
1097 code |= ((uint64_t)c->operands[1].u.operand) << 13;
1098 code |= ((uint64_t)c->operands[0].u.operand) << 26;
1099 code |= ((uint64_t)c->opcode) << 39;
1100 code = ((code & (uint64_t)0xFFFFFFFF00000000ULL) >> 32) |
1101 ((code & (uint64_t)0x00000000FFFFFFFFULL) << 32);
1103 asm_error(ctx, "No emit format for arch %u",
1106 outbuf[0] = (code & (uint64_t)0xFF00000000000000ULL) >> 56;
1107 outbuf[1] = (code & (uint64_t)0x00FF000000000000ULL) >> 48;
1108 outbuf[2] = (code & (uint64_t)0x0000FF0000000000ULL) >> 40;
1109 outbuf[3] = (code & (uint64_t)0x000000FF00000000ULL) >> 32;
1110 outbuf[4] = (code & (uint64_t)0x00000000FF000000ULL) >> 24;
1111 outbuf[5] = (code & (uint64_t)0x0000000000FF0000ULL) >> 16;
1112 outbuf[6] = (code & (uint64_t)0x000000000000FF00ULL) >> 8;
1113 outbuf[7] = (code & (uint64_t)0x00000000000000FFULL) >> 0;
1115 if (fwrite(&outbuf, ARRAY_SIZE(outbuf), 1, fd) != 1) {
1116 fprintf(stderr, "Could not write microcode outfile\n");
1128 static void assemble(void)
1130 struct assembler_context ctx;
1132 memset(&ctx, 0, sizeof(ctx));
1133 INIT_LIST_HEAD(&ctx.output);
1135 eval_directives(&ctx);
1136 assemble_instructions(&ctx);
1137 resolve_labels(&ctx);
1141 static void initialize(void)
1143 INIT_LIST_HEAD(&infile.sl);
1144 INIT_LIST_HEAD(&infile.ivals);
1146 if (IS_INSANE_DEBUG)
1150 #endif /* YYDEBUG */
1153 int main(int argc, char **argv)
1157 err = parse_args(argc, argv);
1164 err = open_input_file();
1170 assemble_initvals();
1174 /* Lazyman simply leaks all allocated memory. */