1 /* ------------------------------------------------------------------------- */
2 /* "verbs" : Manages actions and grammar tables; parses the directives */
5 /* Copyright (c) Graham Nelson 1993 - 2016 */
7 /* This file is part of Inform. */
9 /* Inform is free software: you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation, either version 3 of the License, or */
12 /* (at your option) any later version. */
14 /* Inform is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with Inform. If not, see https://gnu.org/licenses/ */
22 /* ------------------------------------------------------------------------- */
26 int grammar_version_number; /* 1 for pre-Inform 6.06 table format */
27 int32 grammar_version_symbol; /* Index of "Grammar__Version"
28 within symbols table */
30 /* ------------------------------------------------------------------------- */
32 /* ------------------------------------------------------------------------- */
33 /* Array defined below: */
35 /* int32 action_byte_offset[n] The (byte) offset in the Z-machine */
36 /* code area of the ...Sub routine */
37 /* for action n. (NB: This is left */
38 /* blank until the end of the */
39 /* compilation pass.) */
40 /* int32 action_symbol[n] The symbol table index of the n-th */
42 /* ------------------------------------------------------------------------- */
44 int no_actions, /* Number of actions made so far */
45 no_fake_actions; /* Number of fake actions made so far */
47 /* ------------------------------------------------------------------------- */
48 /* Adjectives. (The term "adjective" is traditional; they are mainly */
49 /* prepositions, such as "onto".) */
50 /* ------------------------------------------------------------------------- */
51 /* Arrays defined below: */
53 /* int32 adjectives[n] Byte address of dictionary entry */
54 /* for the nth adjective */
55 /* dict_word adjective_sort_code[n] Dictionary sort code of nth adj */
56 /* ------------------------------------------------------------------------- */
58 int no_adjectives; /* Number of adjectives made so far */
60 /* ------------------------------------------------------------------------- */
61 /* Verbs. Note that Inform-verbs are not quite the same as English verbs: */
62 /* for example the English verbs "take" and "drop" both normally */
63 /* correspond in a game's dictionary to the same Inform verb. An */
64 /* Inform verb is essentially a list of grammar lines. */
65 /* ------------------------------------------------------------------------- */
66 /* Arrays defined below: */
68 /* verbt Inform_verbs[n] The n-th grammar line sequence: */
69 /* see "header.h" for the definition */
70 /* of the typedef struct verbt */
71 /* int32 grammar_token_routine[n] The byte offset from start of code */
72 /* area of the n-th one */
73 /* ------------------------------------------------------------------------- */
75 int no_Inform_verbs, /* Number of Inform-verbs made so far */
76 no_grammar_token_routines; /* Number of routines given in tokens */
78 /* ------------------------------------------------------------------------- */
79 /* We keep a list of English verb-words known (e.g. "take" or "eat") and */
80 /* which Inform-verbs they correspond to. (This list is needed for some */
81 /* of the grammar extension operations.) */
82 /* The format of this list is a sequence of variable-length records: */
84 /* Byte offset to start of next record (1 byte) */
85 /* Inform verb number this word corresponds to (1 byte) */
86 /* The English verb-word (reduced to lower case), null-terminated */
87 /* ------------------------------------------------------------------------- */
89 static char *English_verb_list, /* First byte of first record */
90 *English_verb_list_top; /* Next byte free for new record */
92 static int English_verb_list_size; /* Size of the list in bytes
93 (redundant but convenient) */
95 /* ------------------------------------------------------------------------- */
96 /* Arrays used by this file */
97 /* ------------------------------------------------------------------------- */
100 uchar *grammar_lines;
101 int32 grammar_lines_top;
102 int no_grammar_lines, no_grammar_tokens;
104 int32 *action_byte_offset,
106 *grammar_token_routine,
108 static uchar *adjective_sort_code;
110 /* ------------------------------------------------------------------------- */
111 /* Tracing for compiler maintenance */
112 /* ------------------------------------------------------------------------- */
114 extern void list_verb_table(void)
116 for (i=0; i<no_Inform_verbs; i++)
117 printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
120 /* ------------------------------------------------------------------------- */
122 /* ------------------------------------------------------------------------- */
124 static void new_action(char *b, int c)
126 /* Called whenever a new action (or fake action) is created (either
127 by using make_action above, or the Fake_Action directive, or by
128 the linker). At present just a hook for some tracing code. */
130 if (printprops_switch)
131 printf("Action '%s' is numbered %d\n",b,c);
134 /* Note that fake actions are numbered from a high base point upwards;
135 real actions are numbered from 0 upward in GV2. */
137 extern void make_fake_action(void)
139 char action_sub[MAX_IDENTIFIER_LENGTH+4];
140 debug_location_beginning beginning_debug_location =
141 get_token_location_beginning();
144 if (token_type != SYMBOL_TT)
145 { discard_token_location(beginning_debug_location);
146 ebf_error("new fake action name", token_text);
147 panic_mode_error_recovery(); return;
150 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", token_text);
151 i = symbol_index(action_sub, -1);
153 if (!(sflags[i] & UNKNOWN_SFLAG))
154 { discard_token_location(beginning_debug_location);
155 ebf_error("new fake action name", token_text);
156 panic_mode_error_recovery(); return;
159 assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
162 new_action(token_text, i);
164 if (debugfile_switch)
165 { debug_file_printf("<fake-action>");
166 debug_file_printf("<identifier>##%s</identifier>", token_text);
167 debug_file_printf("<value>%d</value>", svals[i]);
169 write_debug_locations
170 (get_token_location_end(beginning_debug_location));
172 debug_file_printf("</fake-action>");
178 extern assembly_operand action_of_name(char *name)
180 /* Returns the action number of the given name, creating it as a new
181 action name if it isn't already known as such. */
183 char action_sub[MAX_IDENTIFIER_LENGTH+4];
187 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name);
188 j = symbol_index(action_sub, -1);
190 if (stypes[j] == FAKE_ACTION_T)
194 AO.type = LONG_CONSTANT_OT;
196 set_constant_ot(&AO);
197 sflags[j] |= USED_SFLAG;
201 if (sflags[j] & UNKNOWN_SFLAG)
203 if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
204 new_action(name, no_actions);
205 action_symbol[no_actions] = j;
206 assign_symbol(j, no_actions++, CONSTANT_T);
207 sflags[j] |= ACTION_SFLAG;
209 sflags[j] |= USED_SFLAG;
213 AO.marker = ACTION_MV;
215 AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
216 if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
219 AO.type = CONSTANT_OT;
224 extern void find_the_actions(void)
226 char action_name[MAX_IDENTIFIER_LENGTH+4];
227 char action_sub[MAX_IDENTIFIER_LENGTH+4];
230 for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
232 for (i=0; i<no_actions; i++)
233 { strcpy(action_name, (char *) symbs[action_symbol[i]]);
234 action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
235 strcpy(action_sub, action_name);
236 strcat(action_sub, "Sub");
237 j = symbol_index(action_sub, -1);
238 if (sflags[j] & UNKNOWN_SFLAG)
240 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
243 if (stypes[j] != ROUTINE_T)
245 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
246 error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
249 { action_byte_offset[i] = svals[j];
250 sflags[j] |= USED_SFLAG;
255 /* ------------------------------------------------------------------------- */
257 /* ------------------------------------------------------------------------- */
259 static int make_adjective(char *English_word)
261 /* Returns adjective number of the English word supplied, creating
262 a new adjective number if need be.
264 Note that (partly for historical reasons) adjectives are numbered
265 from 0xff downwards. (And partly to make them stand out as tokens.)
267 This routine is used only in grammar version 1: the corresponding
268 table is left empty in GV2. */
271 uchar new_sort_code[MAX_DICT_WORD_BYTES];
273 if (no_adjectives >= MAX_ADJECTIVES)
274 memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
276 dictionary_prepare(English_word, new_sort_code);
277 for (i=0; i<no_adjectives; i++)
278 if (compare_sorts(new_sort_code,
279 adjective_sort_code+i*DICT_WORD_BYTES) == 0)
281 adjectives[no_adjectives]
282 = dictionary_add(English_word,8,0,0xff-no_adjectives);
283 copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
285 return(0xff-no_adjectives++);
288 /* ------------------------------------------------------------------------- */
289 /* Parsing routines. */
290 /* ------------------------------------------------------------------------- */
292 static int make_parsing_routine(int32 routine_address)
294 /* This routine is used only in grammar version 1: the corresponding
295 table is left empty in GV2. */
298 for (l=0; l<no_grammar_token_routines; l++)
299 if (grammar_token_routine[l] == routine_address)
302 grammar_token_routine[l] = routine_address;
303 return(no_grammar_token_routines++);
306 /* ------------------------------------------------------------------------- */
307 /* The English-verb list. */
308 /* ------------------------------------------------------------------------- */
310 static int find_or_renumber_verb(char *English_verb, int *new_number)
312 /* If new_number is null, returns the Inform-verb number which the
313 * given English verb causes, or -1 if the given verb is not in the
316 /* If new_number is non-null, renumbers the Inform-verb number which
317 * English_verb matches in English_verb_list to account for the case
318 * when we are extending a verb. Returns 0 if successful, or -1 if
319 * the given verb is not in the dictionary (which shouldn't happen as
320 * get_verb has already run) */
324 while (p < English_verb_list_top)
325 { if (strcmp(English_verb, p+3) == 0)
327 { p[1] = (*new_number)/256;
328 p[2] = (*new_number)%256;
331 return(256*((uchar)p[1]))+((uchar)p[2]);
338 static void register_verb(char *English_verb, int number)
340 /* Registers a new English verb as referring to the given Inform-verb
341 number. (See comments above for format of the list.) */
343 if (find_or_renumber_verb(English_verb, NULL) != -1)
344 { error_named("Two different verb definitions refer to", English_verb);
348 English_verb_list_size += strlen(English_verb)+4;
349 if (English_verb_list_size >= MAX_VERBSPACE)
350 memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
352 English_verb_list_top[0] = 4+strlen(English_verb);
353 English_verb_list_top[1] = number/256;
354 English_verb_list_top[2] = number%256;
355 strcpy(English_verb_list_top+3, English_verb);
356 English_verb_list_top += English_verb_list_top[0];
359 static int get_verb(void)
361 /* Look at the last-read token: if it's the name of an English verb
362 understood by Inform, in double-quotes, then return the Inform-verb
363 that word refers to: otherwise give an error and return -1. */
367 if ((token_type == DQ_TT) || (token_type == SQ_TT))
368 { j = find_or_renumber_verb(token_text, NULL);
370 error_named("There is no previous grammar for the verb",
375 ebf_error("an English verb in quotes", token_text);
380 /* ------------------------------------------------------------------------- */
381 /* Grammar lines for Verb/Extend directives. */
382 /* ------------------------------------------------------------------------- */
384 static int grammar_line(int verbnum, int line)
386 /* Parse a grammar line, to be written into grammar_lines[mark] onward.
388 Syntax: * <token1> ... <token-n> -> <action>
390 is compiled to a table in the form:
392 <action number : word>
393 <token 1> ... <token n> <ENDIT>
395 where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
397 If grammar_version_number is 1, the token holds
401 and otherwise a GV2 token.
403 Return TRUE if grammar continues after the line, FALSE if the
404 directive comes to an end. */
406 int j, bytecode, mark; int32 wordcode;
407 int grammar_token, slash_mode, last_was_slash;
408 int reverse_action, TOKEN_SIZE;
409 debug_location_beginning beginning_debug_location =
410 get_token_location_beginning();
413 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
414 { discard_token_location(beginning_debug_location);
417 if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
418 { discard_token_location(beginning_debug_location);
419 ebf_error("'*' divider", token_text);
420 panic_mode_error_recovery();
424 /* Have we run out of lines or token space? */
426 if (line >= MAX_LINES_PER_VERB)
427 { discard_token_location(beginning_debug_location);
428 error("Too many lines of grammar for verb. This maximum is built \
429 into Inform, so suggest rewriting grammar using general parsing routines");
433 /* Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long */
434 /* In Glulx, that's 5*32 + 4 = 164 bytes */
436 mark = grammar_lines_top;
438 if (mark + 100 >= MAX_LINESPACE)
439 { discard_token_location(beginning_debug_location);
440 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
444 if (mark + 165 >= MAX_LINESPACE)
445 { discard_token_location(beginning_debug_location);
446 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
450 Inform_verbs[verbnum].l[line] = mark;
461 grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
466 bytecode = 0; wordcode = 0;
467 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
468 { discard_token_location(beginning_debug_location);
469 ebf_error("'->' clause", token_text);
472 if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
473 { if (last_was_slash && (grammar_token>0))
474 ebf_error("grammar token", token_text);
478 if (!last_was_slash) slash_mode = FALSE;
479 if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
480 { if (grammar_version_number == 1)
481 error("'/' can only be used with Library 6/3 or later");
483 ebf_error("grammar token or '->'", token_text);
485 { last_was_slash = TRUE;
487 if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
488 error("'/' can only be applied to prepositions");
489 grammar_lines[mark-TOKEN_SIZE] |= 0x20;
493 else last_was_slash = FALSE;
495 if ((token_type == DQ_TT) || (token_type == SQ_TT))
496 { if (grammar_version_number == 1)
497 bytecode = make_adjective(token_text);
500 wordcode = dictionary_add(token_text, 8, 0, 0);
503 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
505 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
507 /* noun = <routine> */
510 if ((token_type != SYMBOL_TT)
511 || (stypes[token_value] != ROUTINE_T))
512 { discard_token_location(beginning_debug_location);
513 ebf_error("routine name after 'noun='", token_text);
514 panic_mode_error_recovery();
517 if (grammar_version_number == 1)
519 = 16 + make_parsing_routine(svals[token_value]);
522 wordcode = svals[token_value];
524 sflags[token_value] |= USED_SFLAG;
528 if (grammar_version_number == 1) bytecode=0;
529 else { bytecode = 1; wordcode = 0; }
532 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
533 { if (grammar_version_number==1) bytecode=1;
534 else { bytecode=1; wordcode=1; } }
535 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
536 { if (grammar_version_number==1) bytecode=2;
537 else { bytecode=1; wordcode=2; } }
538 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
539 { if (grammar_version_number==1) bytecode=3;
540 else { bytecode=1; wordcode=3; } }
541 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
542 { if (grammar_version_number==1) bytecode=4;
543 else { bytecode=1; wordcode=4; } }
544 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
545 { if (grammar_version_number==1) bytecode=5;
546 else { bytecode=1; wordcode=5; } }
547 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
548 { if (grammar_version_number==1) bytecode=6;
549 else { bytecode=1; wordcode=6; } }
550 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
551 { if (grammar_version_number==1) bytecode=7;
552 else { bytecode=1; wordcode=7; } }
553 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
554 { if (grammar_version_number==1) bytecode=8;
555 else { bytecode=1; wordcode=8; } }
556 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
557 { if (grammar_version_number==1)
558 error("The 'topic' token is only available if you \
559 are using Library 6/3 or later");
560 else { bytecode=1; wordcode=9; } }
561 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
563 /* scope = <routine> */
566 if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
567 { discard_token_location(beginning_debug_location);
568 ebf_error("'=' after 'scope'", token_text);
569 panic_mode_error_recovery();
574 if ((token_type != SYMBOL_TT)
575 || (stypes[token_value] != ROUTINE_T))
576 { discard_token_location(beginning_debug_location);
577 ebf_error("routine name after 'scope='", token_text);
578 panic_mode_error_recovery();
582 if (grammar_version_number == 1)
584 make_parsing_routine(svals[token_value]);
585 else { bytecode = 0x85; wordcode = svals[token_value]; }
586 sflags[token_value] |= USED_SFLAG;
588 else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
589 { discard_token_location(beginning_debug_location);
590 error("'=' is only legal here as 'noun=Routine'");
591 panic_mode_error_recovery();
594 else { /* <attribute> or <general-parsing-routine> tokens */
596 if ((token_type != SYMBOL_TT)
597 || ((stypes[token_value] != ATTRIBUTE_T)
598 && (stypes[token_value] != ROUTINE_T)))
599 { discard_token_location(beginning_debug_location);
600 error_named("No such grammar token as", token_text);
601 panic_mode_error_recovery();
604 if (stypes[token_value]==ATTRIBUTE_T)
605 { if (grammar_version_number == 1)
606 bytecode = 128 + svals[token_value];
607 else { bytecode = 4; wordcode = svals[token_value]; }
610 { if (grammar_version_number == 1)
612 make_parsing_routine(svals[token_value]);
613 else { bytecode = 0x86; wordcode = svals[token_value]; }
615 sflags[token_value] |= USED_SFLAG;
618 grammar_token++; no_grammar_tokens++;
619 if ((grammar_version_number == 1) && (grammar_token > 6))
620 { if (grammar_token == 7)
621 warning("Grammar line cut short: you can only have up to 6 \
622 tokens in any line (unless you're compiling with library 6/3 or later)");
626 { if (bytecode != 0x42)
627 error("'/' can only be applied to prepositions");
630 grammar_lines[mark++] = bytecode;
632 grammar_lines[mark++] = wordcode/256;
633 grammar_lines[mark++] = wordcode%256;
636 grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
637 grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
638 grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
639 grammar_lines[mark++] = ((wordcode) & 0xFF);
645 grammar_lines[mark++] = 15;
646 grammar_lines_top = mark;
648 dont_enter_into_symbol_table = TRUE;
650 dont_enter_into_symbol_table = FALSE;
652 if (token_type != DQ_TT)
653 { discard_token_location(beginning_debug_location);
654 ebf_error("name of new or existing action", token_text);
655 panic_mode_error_recovery();
659 { assembly_operand AO = action_of_name(token_text);
661 if (j >= ((grammar_version_number==1)?256:4096))
662 error_named("This is a fake action, not a real one:", token_text);
665 reverse_action = FALSE;
667 if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
668 { if (grammar_version_number == 1)
669 error("'reverse' actions can only be used with \
670 Library 6/3 or later");
671 reverse_action = TRUE;
673 else put_token_back();
675 mark = Inform_verbs[verbnum].l[line];
677 if (debugfile_switch)
678 { debug_file_printf("<table-entry>");
679 debug_file_printf("<type>grammar line</type>");
680 debug_file_printf("<address>");
681 write_debug_grammar_backpatch(mark);
682 debug_file_printf("</address>");
683 debug_file_printf("<end-address>");
684 write_debug_grammar_backpatch(grammar_lines_top);
685 debug_file_printf("</end-address>");
686 write_debug_locations
687 (get_token_location_end(beginning_debug_location));
688 debug_file_printf("</table-entry>");
694 grammar_lines[mark++] = j/256;
695 grammar_lines[mark++] = j%256;
698 grammar_lines[mark++] = ((j >> 8) & 0xFF);
699 grammar_lines[mark++] = ((j) & 0xFF);
700 grammar_lines[mark++] = (reverse_action ? 1 : 0);
706 /* ------------------------------------------------------------------------- */
707 /* The Verb directive: */
709 /* Verb [meta] "word-1" ... "word-n" | = "existing-English-verb" */
710 /* | <grammar-line-1> ... <g-line-n> */
712 /* ------------------------------------------------------------------------- */
714 extern void make_verb(void)
716 /* Parse an entire Verb ... directive. */
718 int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
720 char *English_verbs_given[32]; int no_given = 0, i;
722 directive_keywords.enabled = TRUE;
726 if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
727 { meta_verb_flag = TRUE;
731 while ((token_type == DQ_TT) || (token_type == SQ_TT))
732 { English_verbs_given[no_given++] = token_text;
737 { ebf_error("English verb in quotes", token_text);
738 panic_mode_error_recovery(); return;
741 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
742 { verb_equals_form = TRUE;
744 Inform_verb = get_verb();
745 if (Inform_verb == -1) return;
747 if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
748 ebf_error("';' after English verb", token_text);
751 { Inform_verb = no_Inform_verbs;
752 if (no_Inform_verbs == MAX_VERBS)
753 memoryerror("MAX_VERBS",MAX_VERBS);
756 for (i=0; i<no_given; i++)
757 { dictionary_add(English_verbs_given[i],
758 0x41 + ((meta_verb_flag)?0x02:0x00),
759 (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
760 register_verb(English_verbs_given[i], Inform_verb);
763 if (!verb_equals_form)
766 while (grammar_line(no_Inform_verbs, lines++)) ;
767 Inform_verbs[no_Inform_verbs++].lines = --lines;
770 directive_keywords.enabled = FALSE;
773 /* ------------------------------------------------------------------------- */
774 /* The Extend directive: */
776 /* Extend | only "verb-1" ... "verb-n" | <grammar-lines> */
777 /* | "verb" | "replace" */
781 /* ------------------------------------------------------------------------- */
783 #define EXTEND_REPLACE 1
784 #define EXTEND_FIRST 2
785 #define EXTEND_LAST 3
787 extern void extend_verb(void)
789 /* Parse an entire Extend ... directive. */
791 int Inform_verb = -1, k, l, lines, extend_mode;
793 directive_keywords.enabled = TRUE;
794 directives.enabled = FALSE;
797 if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
799 if (no_Inform_verbs == MAX_VERBS)
800 memoryerror("MAX_VERBS", MAX_VERBS);
801 while (get_next_token(),
802 ((token_type == DQ_TT) || (token_type == SQ_TT)))
803 { Inform_verb = get_verb();
804 if (Inform_verb == -1) return;
805 if ((l!=-1) && (Inform_verb!=l))
806 warning_named("Verb disagrees with previous verbs:", token_text);
808 dictionary_set_verb_number(token_text,
809 (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
810 /* make call to renumber verb in English_verb_list too */
811 if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
812 warning_named("Verb to extend not found in English_verb_list:",
816 /* Copy the old Inform-verb into a new one which the list of
817 English-verbs given have had their dictionary entries modified
820 Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
821 Inform_verb = no_Inform_verbs++;
824 { Inform_verb = get_verb();
825 if (Inform_verb == -1) return;
829 /* Inform_verb now contains the number of the Inform-verb to extend... */
831 extend_mode = EXTEND_LAST;
832 if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
836 if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
837 extend_mode = EXTEND_REPLACE;
838 if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
839 extend_mode = EXTEND_FIRST;
840 if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
841 extend_mode = EXTEND_LAST;
844 { ebf_error("'replace', 'last', 'first' or '*'", token_text);
845 extend_mode = EXTEND_LAST;
849 l = Inform_verbs[Inform_verb].lines;
851 if (extend_mode == EXTEND_LAST) lines=l;
853 { if (extend_mode == EXTEND_FIRST)
855 Inform_verbs[Inform_verb].l[k+lines]
856 = Inform_verbs[Inform_verb].l[k-1+lines];
857 } while (grammar_line(Inform_verb, lines++));
859 if (extend_mode == EXTEND_FIRST)
860 { Inform_verbs[Inform_verb].lines = l+lines-1;
862 Inform_verbs[Inform_verb].l[k+lines-1]
863 = Inform_verbs[Inform_verb].l[k+lines];
865 else Inform_verbs[Inform_verb].lines = --lines;
867 directive_keywords.enabled = FALSE;
868 directives.enabled = TRUE;
871 /* ========================================================================= */
872 /* Data structure management routines */
873 /* ------------------------------------------------------------------------- */
875 extern void init_verbs_vars(void)
879 no_grammar_lines = 0;
880 no_grammar_tokens = 0;
881 English_verb_list_size = 0;
884 action_byte_offset = NULL;
885 grammar_token_routine = NULL;
887 adjective_sort_code = NULL;
888 English_verb_list = NULL;
891 grammar_version_number = 1;
893 grammar_version_number = 2;
896 extern void verbs_begin_pass(void)
898 no_Inform_verbs=0; no_adjectives=0;
899 no_grammar_token_routines=0;
903 grammar_lines_top = 0;
906 extern void verbs_allocate_arrays(void)
908 Inform_verbs = my_calloc(sizeof(verbt), MAX_VERBS, "verbs");
909 grammar_lines = my_malloc(MAX_LINESPACE, "grammar lines");
910 action_byte_offset = my_calloc(sizeof(int32), MAX_ACTIONS, "actions");
911 action_symbol = my_calloc(sizeof(int32), MAX_ACTIONS,
913 grammar_token_routine = my_calloc(sizeof(int32), MAX_ACTIONS,
914 "grammar token routines");
915 adjectives = my_calloc(sizeof(int32), MAX_ADJECTIVES,
917 adjective_sort_code = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
918 "adjective sort codes");
920 English_verb_list = my_malloc(MAX_VERBSPACE, "register of verbs");
921 English_verb_list_top = English_verb_list;
924 extern void verbs_free_arrays(void)
926 my_free(&Inform_verbs, "verbs");
927 my_free(&grammar_lines, "grammar lines");
928 my_free(&action_byte_offset, "actions");
929 my_free(&action_symbol, "action symbols");
930 my_free(&grammar_token_routine, "grammar token routines");
931 my_free(&adjectives, "adjectives");
932 my_free(&adjective_sort_code, "adjective sort codes");
933 my_free(&English_verb_list, "register of verbs");
936 /* ========================================================================= */