Update to commit e33eef4f8fab800eaf4a32b2d159cde6c4bbb38e
[inform.git] / src / verbs.c
1 /* ------------------------------------------------------------------------- */
2 /*   "verbs" :  Manages actions and grammar tables; parses the directives    */
3 /*              Verb and Extend.                                             */
4 /*                                                                           */
5 /*   Part of Inform 6.35                                                     */
6 /*   copyright (c) Graham Nelson 1993 - 2021                                 */
7 /*                                                                           */
8 /* Inform is free software: you can redistribute it and/or modify            */
9 /* it under the terms of the GNU General Public License as published by      */
10 /* the Free Software Foundation, either version 3 of the License, or         */
11 /* (at your option) any later version.                                       */
12 /*                                                                           */
13 /* Inform is distributed in the hope that it will be useful,                 */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of            */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the              */
16 /* GNU General Public License for more details.                              */
17 /*                                                                           */
18 /* You should have received a copy of the GNU General Public License         */
19 /* along with Inform. If not, see https://gnu.org/licenses/                  */
20 /*                                                                           */
21 /* ------------------------------------------------------------------------- */
22
23 #include "header.h"
24
25 int grammar_version_number;            /* 1 for pre-Inform 6.06 table format */
26 int32 grammar_version_symbol;          /* Index of "Grammar__Version"
27                                           within symbols table               */
28
29 /* ------------------------------------------------------------------------- */
30 /*   Actions.                                                                */
31 /* ------------------------------------------------------------------------- */
32 /*   Array defined below:                                                    */
33 /*                                                                           */
34 /*    int32   action_byte_offset[n]       The (byte) offset in the Z-machine */
35 /*                                        code area of the ...Sub routine    */
36 /*                                        for action n.  (NB: This is left   */
37 /*                                        blank until the end of the         */
38 /*                                        compilation pass.)                 */
39 /*    int32   action_symbol[n]            The symbol table index of the n-th */
40 /*                                        action's name.                     */
41 /* ------------------------------------------------------------------------- */
42
43 int no_actions,                        /* Number of actions made so far      */
44     no_fake_actions;                   /* Number of fake actions made so far */
45
46 /* ------------------------------------------------------------------------- */
47 /*   Adjectives.  (The term "adjective" is traditional; they are mainly      */
48 /*                prepositions, such as "onto".)                             */
49 /* ------------------------------------------------------------------------- */
50 /*   Arrays defined below:                                                   */
51 /*                                                                           */
52 /*    int32 adjectives[n]                 Byte address of dictionary entry   */
53 /*                                        for the nth adjective              */
54 /*    dict_word adjective_sort_code[n]    Dictionary sort code of nth adj    */
55 /* ------------------------------------------------------------------------- */
56
57 int no_adjectives;                     /* Number of adjectives made so far   */
58
59 /* ------------------------------------------------------------------------- */
60 /*   Verbs.  Note that Inform-verbs are not quite the same as English verbs: */
61 /*           for example the English verbs "take" and "drop" both normally   */
62 /*           correspond in a game's dictionary to the same Inform verb.  An  */
63 /*           Inform verb is essentially a list of grammar lines.             */
64 /*           (Calling them "English verbs" is of course out of date. Read    */
65 /*           this as jargon for "dict words which are verbs".                */
66 /* ------------------------------------------------------------------------- */
67 /*   Arrays defined below:                                                   */
68 /*                                                                           */
69 /*    verbt Inform_verbs[n]               The n-th grammar line sequence:    */
70 /*                                        see "header.h" for the definition  */
71 /*                                        of the typedef struct verbt        */
72 /*    int32 grammar_token_routine[n]      The byte offset from start of code */
73 /*                                        area of the n-th one               */
74 /* ------------------------------------------------------------------------- */
75
76 int no_Inform_verbs,                   /* Number of Inform-verbs made so far */
77     no_grammar_token_routines;         /* Number of routines given in tokens */
78
79 /* ------------------------------------------------------------------------- */
80 /*   We keep a list of English verb-words known (e.g. "take" or "eat") and   */
81 /*   which Inform-verbs they correspond to.  (This list is needed for some   */
82 /*   of the grammar extension operations.)                                   */
83 /*   The format of this list is a sequence of variable-length records:       */
84 /*                                                                           */
85 /*     Byte offset to start of next record  (1 byte)                         */
86 /*     Inform verb number this word corresponds to  (1 byte)                 */
87 /*     The English verb-word (reduced to lower case), null-terminated        */
88 /* ------------------------------------------------------------------------- */
89
90 static char *English_verb_list,        /* First byte of first record         */
91             *English_verb_list_top;    /* Next byte free for new record      */
92
93 static int English_verb_list_size;     /* Size of the list in bytes
94                                           (redundant but convenient)         */
95
96 /* Maximum synonyms in a single Verb/Extend directive */
97 #define MAX_VERB_SYNONYMS (32)
98
99 /* ------------------------------------------------------------------------- */
100 /*   Arrays used by this file                                                */
101 /* ------------------------------------------------------------------------- */
102
103   verbt   *Inform_verbs;
104   uchar   *grammar_lines;
105   int32    grammar_lines_top;
106   int      no_grammar_lines, no_grammar_tokens;
107
108   int32   *action_byte_offset,
109           *action_symbol,
110           *grammar_token_routine,
111           *adjectives;
112   static uchar *adjective_sort_code;
113
114 /* ------------------------------------------------------------------------- */
115 /*   Tracing for compiler maintenance                                        */
116 /* ------------------------------------------------------------------------- */
117
118 extern void list_verb_table(void)
119 {   int i;
120     for (i=0; i<no_Inform_verbs; i++)
121         printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
122 }
123
124 /* ------------------------------------------------------------------------- */
125 /*   Actions.                                                                */
126 /* ------------------------------------------------------------------------- */
127
128 static void new_action(char *b, int c)
129 {
130     /*  Called whenever a new action (or fake action) is created (either
131         by using make_action above, or the Fake_Action directive, or by
132         the linker).  At present just a hook for some tracing code.          */
133
134     if (printprops_switch)
135         printf("Action '%s' is numbered %d\n",b,c);
136 }
137
138 /* Note that fake actions are numbered from a high base point upwards;
139    real actions are numbered from 0 upward in GV2.                           */
140
141 extern void make_fake_action(void)
142 {   int i;
143     char action_sub[MAX_IDENTIFIER_LENGTH+4];
144     debug_location_beginning beginning_debug_location =
145         get_token_location_beginning();
146
147     get_next_token();
148     if (token_type != SYMBOL_TT)
149     {   discard_token_location(beginning_debug_location);
150         ebf_error("new fake action name", token_text);
151         panic_mode_error_recovery(); return;
152     }
153     /* Action symbols (including fake_actions) may collide with other kinds of symbols. So we don't check that. */
154
155     snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", token_text);
156     i = symbol_index(action_sub, -1);
157
158     if (!(sflags[i] & UNKNOWN_SFLAG))
159     {   discard_token_location(beginning_debug_location);
160         /* The user didn't know they were defining FOO__A, but they were and it's a problem. */
161         ebf_symbol_error("new fake action name", action_sub, typename(stypes[i]), slines[i]);
162         panic_mode_error_recovery(); return;
163     }
164
165     assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
166         FAKE_ACTION_T);
167
168     new_action(token_text, i);
169
170     if (debugfile_switch)
171     {   debug_file_printf("<fake-action>");
172         debug_file_printf("<identifier>##%s</identifier>", token_text);
173         debug_file_printf("<value>%d</value>", svals[i]);
174         get_next_token();
175         write_debug_locations
176             (get_token_location_end(beginning_debug_location));
177         put_token_back();
178         debug_file_printf("</fake-action>");
179     }
180
181     return;
182 }
183
184 extern assembly_operand action_of_name(char *name)
185 {
186     /*  Returns the action number of the given name, creating it as a new
187         action name if it isn't already known as such.                       */
188
189     char action_sub[MAX_IDENTIFIER_LENGTH+4];
190     int j;
191     assembly_operand AO;
192
193     snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name);
194     j = symbol_index(action_sub, -1);
195
196     if (stypes[j] == FAKE_ACTION_T)
197     {   INITAO(&AO);
198         AO.value = svals[j];
199         if (!glulx_mode)
200           AO.type = LONG_CONSTANT_OT;
201         else
202           set_constant_ot(&AO);
203         sflags[j] |= USED_SFLAG;
204         return AO;
205     }
206
207     if (sflags[j] & UNKNOWN_SFLAG)
208     {
209         if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
210         new_action(name, no_actions);
211         action_symbol[no_actions] = j;
212         assign_symbol(j, no_actions++, CONSTANT_T);
213         sflags[j] |= ACTION_SFLAG;
214     }
215     sflags[j] |= USED_SFLAG;
216
217     INITAO(&AO);
218     AO.value = svals[j];
219     AO.marker = ACTION_MV;
220     if (!glulx_mode) {
221       AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
222       if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
223     }
224     else {
225       AO.type = CONSTANT_OT;
226     }
227     return AO;
228 }
229
230 extern void find_the_actions(void)
231 {   int i; int32 j;
232     char action_name[MAX_IDENTIFIER_LENGTH+4];
233     char action_sub[MAX_IDENTIFIER_LENGTH+4];
234
235     if (module_switch)
236         for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
237     else
238     for (i=0; i<no_actions; i++)
239     {   strcpy(action_name, (char *) symbs[action_symbol[i]]);
240         action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
241         strcpy(action_sub, action_name);
242         strcat(action_sub, "Sub");
243         j = symbol_index(action_sub, -1);
244         if (sflags[j] & UNKNOWN_SFLAG)
245         {
246             error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
247         }
248         else
249         if (stypes[j] != ROUTINE_T)
250         {
251             error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
252             error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
253         }
254         else
255         {   action_byte_offset[i] = svals[j];
256             sflags[j] |= USED_SFLAG;
257         }
258     }
259 }
260
261 /* ------------------------------------------------------------------------- */
262 /*   Adjectives.                                                             */
263 /* ------------------------------------------------------------------------- */
264
265 static int make_adjective(char *English_word)
266 {
267     /*  Returns adjective number of the English word supplied, creating
268         a new adjective number if need be.
269
270         Note that (partly for historical reasons) adjectives are numbered
271         from 0xff downwards.  (And partly to make them stand out as tokens.)
272
273         This routine is used only in grammar version 1: the corresponding
274         table is left empty in GV2.                                          */
275
276     int i; 
277     uchar new_sort_code[MAX_DICT_WORD_BYTES];
278
279     if (no_adjectives >= MAX_ADJECTIVES)
280         memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
281
282     dictionary_prepare(English_word, new_sort_code);
283     for (i=0; i<no_adjectives; i++)
284         if (compare_sorts(new_sort_code,
285           adjective_sort_code+i*DICT_WORD_BYTES) == 0)
286             return(0xff-i);
287     adjectives[no_adjectives]
288         = dictionary_add(English_word,8,0,0xff-no_adjectives);
289     copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
290         new_sort_code);
291     return(0xff-no_adjectives++);
292 }
293
294 /* ------------------------------------------------------------------------- */
295 /*   Parsing routines.                                                       */
296 /* ------------------------------------------------------------------------- */
297
298 static int make_parsing_routine(int32 routine_address)
299 {
300     /*  This routine is used only in grammar version 1: the corresponding
301         table is left empty in GV2.                                          */
302
303     int l;
304     for (l=0; l<no_grammar_token_routines; l++)
305         if (grammar_token_routine[l] == routine_address)
306             return l;
307
308     grammar_token_routine[l] = routine_address;
309     return(no_grammar_token_routines++);
310 }
311
312 /* ------------------------------------------------------------------------- */
313 /*   The English-verb list.                                                  */
314 /* ------------------------------------------------------------------------- */
315
316 static int find_or_renumber_verb(char *English_verb, int *new_number)
317 {
318     /*  If new_number is null, returns the Inform-verb number which the
319      *  given English verb causes, or -1 if the given verb is not in the
320      *  dictionary                     */
321
322     /*  If new_number is non-null, renumbers the Inform-verb number which
323      *  English_verb matches in English_verb_list to account for the case
324      *  when we are extending a verb.  Returns 0 if successful, or -1 if
325      *  the given verb is not in the dictionary (which shouldn't happen as
326      *  get_verb has already run) */
327
328     char *p;
329     p=English_verb_list;
330     while (p < English_verb_list_top)
331     {   if (strcmp(English_verb, p+3) == 0)
332         {   if (new_number)
333             {   p[1] = (*new_number)/256;
334                 p[2] = (*new_number)%256;
335                 return 0;
336             }
337             return(256*((uchar)p[1]))+((uchar)p[2]);
338         }
339         p=p+(uchar)p[0];
340     }
341     return(-1);
342 }
343
344 static void register_verb(char *English_verb, int number)
345 {
346     /*  Registers a new English verb as referring to the given Inform-verb
347         number.  (See comments above for format of the list.)                */
348     int entrysize;
349
350     if (find_or_renumber_verb(English_verb, NULL) != -1)
351     {   error_named("Two different verb definitions refer to", English_verb);
352         return;
353     }
354
355     /* We set a hard limit of MAX_VERB_WORD_SIZE=120 because the
356        English_verb_list table stores length in a leading byte. (We could
357        raise that to 250, really, but there's little point when
358        MAX_DICT_WORD_SIZE is 40.) */
359     entrysize = strlen(English_verb)+4;
360     if (entrysize > MAX_VERB_WORD_SIZE+4)
361         error_numbered("Verb word is too long -- max length is", MAX_VERB_WORD_SIZE);
362     English_verb_list_size += entrysize;
363     if (English_verb_list_size >= MAX_VERBSPACE)
364         memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
365
366     English_verb_list_top[0] = entrysize;
367     English_verb_list_top[1] = number/256;
368     English_verb_list_top[2] = number%256;
369     strcpy(English_verb_list_top+3, English_verb);
370     English_verb_list_top += entrysize;
371 }
372
373 static int get_verb(void)
374 {
375     /*  Look at the last-read token: if it's the name of an English verb
376         understood by Inform, in double-quotes, then return the Inform-verb
377         that word refers to: otherwise give an error and return -1.          */
378
379     int j;
380
381     if ((token_type == DQ_TT) || (token_type == SQ_TT))
382     {   j = find_or_renumber_verb(token_text, NULL);
383         if (j==-1)
384             error_named("There is no previous grammar for the verb",
385                 token_text);
386         return j;
387     }
388
389     ebf_error("an English verb in quotes", token_text);
390
391     return -1;
392 }
393
394 /* ------------------------------------------------------------------------- */
395 /*   Grammar lines for Verb/Extend directives.                               */
396 /* ------------------------------------------------------------------------- */
397
398 static int grammar_line(int verbnum, int line)
399 {
400     /*  Parse a grammar line, to be written into grammar_lines[mark] onward.
401
402         Syntax: * <token1> ... <token-n> -> <action>
403
404         is compiled to a table in the form:
405
406                 <action number : word>
407                 <token 1> ... <token n> <ENDIT>
408
409         where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
410
411         If grammar_version_number is 1, the token holds
412
413                 <bytecode> 00 00
414
415         and otherwise a GV2 token.
416
417         Return TRUE if grammar continues after the line, FALSE if the
418         directive comes to an end.                                           */
419
420     int j, bytecode, mark; int32 wordcode;
421     int grammar_token, slash_mode, last_was_slash;
422     int reverse_action, TOKEN_SIZE;
423     debug_location_beginning beginning_debug_location =
424         get_token_location_beginning();
425
426     get_next_token();
427     if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
428     {   discard_token_location(beginning_debug_location);
429         return FALSE;
430     }
431     if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
432     {   discard_token_location(beginning_debug_location);
433         ebf_error("'*' divider", token_text);
434         panic_mode_error_recovery();
435         return FALSE;
436     }
437
438     /*  Have we run out of lines or token space?  */
439
440     if (line >= MAX_LINES_PER_VERB)
441     {   discard_token_location(beginning_debug_location);
442         error("Too many lines of grammar for verb. This maximum is built \
443 into Inform, so suggest rewriting grammar using general parsing routines");
444         return(FALSE);
445     }
446
447     /*  Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long  */
448     /*  In Glulx, that's 5*32 + 4 = 164 bytes */
449
450     mark = grammar_lines_top;
451     if (!glulx_mode) {
452         if (mark + 100 >= MAX_LINESPACE)
453         {   discard_token_location(beginning_debug_location);
454             memoryerror("MAX_LINESPACE", MAX_LINESPACE);
455         }
456     }
457     else {
458         if (mark + 165 >= MAX_LINESPACE)
459         {   discard_token_location(beginning_debug_location);
460             memoryerror("MAX_LINESPACE", MAX_LINESPACE);
461         }
462     }
463
464     Inform_verbs[verbnum].l[line] = mark;
465
466     if (!glulx_mode) {
467         mark = mark + 2;
468         TOKEN_SIZE = 3;
469     }
470     else {
471         mark = mark + 3;
472         TOKEN_SIZE = 5;
473     }
474
475     grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
476     no_grammar_lines++;
477
478     do
479     {   get_next_token();
480         bytecode = 0; wordcode = 0;
481         if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
482         {   discard_token_location(beginning_debug_location);
483             ebf_error("'->' clause", token_text);
484             return FALSE;
485         }
486         if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
487         {   if (last_was_slash && (grammar_token>0))
488                 ebf_error("grammar token", token_text);
489             break;
490         }
491
492         if (!last_was_slash) slash_mode = FALSE;
493         if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
494         {   if (grammar_version_number == 1)
495                 error("'/' can only be used with Library 6/3 or later");
496             if (last_was_slash)
497                 ebf_error("grammar token or '->'", token_text);
498             else
499             {   last_was_slash = TRUE;
500                 slash_mode = TRUE;
501                 if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
502                     error("'/' can only be applied to prepositions");
503                 grammar_lines[mark-TOKEN_SIZE] |= 0x20;
504                 continue;
505             }
506         }
507         else last_was_slash = FALSE;
508
509         if ((token_type == DQ_TT) || (token_type == SQ_TT))
510         {    if (grammar_version_number == 1)
511                  bytecode = make_adjective(token_text);
512              else
513              {   bytecode = 0x42;
514                  wordcode = dictionary_add(token_text, 8, 0, 0);
515              }
516         }
517         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
518              {   get_next_token();
519                  if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
520                  {
521                      /*  noun = <routine>                                    */
522
523                      get_next_token();
524                      if ((token_type != SYMBOL_TT)
525                          || (stypes[token_value] != ROUTINE_T))
526                      {   discard_token_location(beginning_debug_location);
527                          ebf_error("routine name after 'noun='", token_text);
528                          panic_mode_error_recovery();
529                          return FALSE;
530                      }
531                      if (grammar_version_number == 1)
532                          bytecode
533                              = 16 + make_parsing_routine(svals[token_value]);
534                      else
535                      {   bytecode = 0x83;
536                          wordcode = svals[token_value];
537                      }
538                      sflags[token_value] |= USED_SFLAG;
539                  }
540                  else
541                  {   put_token_back();
542                      if (grammar_version_number == 1) bytecode=0;
543                      else { bytecode = 1; wordcode = 0; }
544                  }
545              }
546         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
547              {   if (grammar_version_number==1) bytecode=1;
548                  else { bytecode=1; wordcode=1; } }
549         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
550              {   if (grammar_version_number==1) bytecode=2;
551                  else { bytecode=1; wordcode=2; } }
552         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
553              {   if (grammar_version_number==1) bytecode=3;
554                  else { bytecode=1; wordcode=3; } }
555         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
556              {   if (grammar_version_number==1) bytecode=4;
557                  else { bytecode=1; wordcode=4; } }
558         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
559              {   if (grammar_version_number==1) bytecode=5;
560                  else { bytecode=1; wordcode=5; } }
561         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
562              {   if (grammar_version_number==1) bytecode=6;
563                  else { bytecode=1; wordcode=6; } }
564         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
565              {   if (grammar_version_number==1) bytecode=7;
566                  else { bytecode=1; wordcode=7; } }
567         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
568              {   if (grammar_version_number==1) bytecode=8;
569                  else { bytecode=1; wordcode=8; } }
570         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
571              {   if (grammar_version_number==1)
572                      error("The 'topic' token is only available if you \
573 are using Library 6/3 or later");
574                  else { bytecode=1; wordcode=9; } }
575         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
576              {
577                  /*  scope = <routine> */
578
579                  get_next_token();
580                  if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
581                  {   discard_token_location(beginning_debug_location);
582                      ebf_error("'=' after 'scope'", token_text);
583                      panic_mode_error_recovery();
584                      return FALSE;
585                  }
586
587                  get_next_token();
588                  if ((token_type != SYMBOL_TT)
589                      || (stypes[token_value] != ROUTINE_T))
590                  {   discard_token_location(beginning_debug_location);
591                      ebf_error("routine name after 'scope='", token_text);
592                      panic_mode_error_recovery();
593                      return FALSE;
594                  }
595
596                  if (grammar_version_number == 1)
597                      bytecode = 80 +
598                          make_parsing_routine(svals[token_value]);
599                  else { bytecode = 0x85; wordcode = svals[token_value]; }
600                  sflags[token_value] |= USED_SFLAG;
601              }
602         else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
603              {   discard_token_location(beginning_debug_location);
604                  error("'=' is only legal here as 'noun=Routine'");
605                  panic_mode_error_recovery();
606                  return FALSE;
607              }
608         else {   /*  <attribute>  or  <general-parsing-routine>  tokens      */
609
610                  if ((token_type != SYMBOL_TT)
611                      || ((stypes[token_value] != ATTRIBUTE_T)
612                          && (stypes[token_value] != ROUTINE_T)))
613                  {   discard_token_location(beginning_debug_location);
614                      error_named("No such grammar token as", token_text);
615                      panic_mode_error_recovery();
616                      return FALSE;
617                  }
618                  if (stypes[token_value]==ATTRIBUTE_T)
619                  {   if (grammar_version_number == 1)
620                          bytecode = 128 + svals[token_value];
621                      else { bytecode = 4; wordcode = svals[token_value]; }
622                  }
623                  else
624                  {   if (grammar_version_number == 1)
625                          bytecode = 48 +
626                              make_parsing_routine(svals[token_value]);
627                      else { bytecode = 0x86; wordcode = svals[token_value]; }
628                  }
629                  sflags[token_value] |= USED_SFLAG;
630              }
631
632         grammar_token++; no_grammar_tokens++;
633         if ((grammar_version_number == 1) && (grammar_token > 6))
634         {   if (grammar_token == 7)
635                 warning("Grammar line cut short: you can only have up to 6 \
636 tokens in any line (unless you're compiling with library 6/3 or later)");
637         }
638         else
639         {   if (slash_mode)
640             {   if (bytecode != 0x42)
641                     error("'/' can only be applied to prepositions");
642                 bytecode |= 0x10;
643             }
644             grammar_lines[mark++] = bytecode;
645             if (!glulx_mode) {
646                 grammar_lines[mark++] = wordcode/256;
647                 grammar_lines[mark++] = wordcode%256;
648             }
649             else {
650                 grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
651                 grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
652                 grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
653                 grammar_lines[mark++] = ((wordcode) & 0xFF);
654             }
655         }
656
657     } while (TRUE);
658
659     grammar_lines[mark++] = 15;
660     grammar_lines_top = mark;
661
662     dont_enter_into_symbol_table = TRUE;
663     get_next_token();
664     dont_enter_into_symbol_table = FALSE;
665
666     if (token_type != DQ_TT)
667     {   discard_token_location(beginning_debug_location);
668         ebf_error("name of new or existing action", token_text);
669         panic_mode_error_recovery();
670         return FALSE;
671     }
672
673     {   assembly_operand AO = action_of_name(token_text);
674         j = AO.value;
675         if (j >= ((grammar_version_number==1)?256:4096))
676             error_named("This is a fake action, not a real one:", token_text);
677     }
678
679     reverse_action = FALSE;
680     get_next_token();
681     if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
682     {   if (grammar_version_number == 1)
683             error("'reverse' actions can only be used with \
684 Library 6/3 or later");
685         reverse_action = TRUE;
686     }
687     else put_token_back();
688
689     mark = Inform_verbs[verbnum].l[line];
690
691     if (debugfile_switch)
692     {   debug_file_printf("<table-entry>");
693         debug_file_printf("<type>grammar line</type>");
694         debug_file_printf("<address>");
695         write_debug_grammar_backpatch(mark);
696         debug_file_printf("</address>");
697         debug_file_printf("<end-address>");
698         write_debug_grammar_backpatch(grammar_lines_top);
699         debug_file_printf("</end-address>");
700         write_debug_locations
701             (get_token_location_end(beginning_debug_location));
702         debug_file_printf("</table-entry>");
703     }
704
705     if (!glulx_mode) {
706         if (reverse_action)
707             j = j + 0x400;
708         grammar_lines[mark++] = j/256;
709         grammar_lines[mark++] = j%256;
710     }
711     else {
712         grammar_lines[mark++] = ((j >> 8) & 0xFF);
713         grammar_lines[mark++] = ((j) & 0xFF);
714         grammar_lines[mark++] = (reverse_action ? 1 : 0);
715     }
716
717     return TRUE;
718 }
719
720 /* ------------------------------------------------------------------------- */
721 /*   The Verb directive:                                                     */
722 /*                                                                           */
723 /*       Verb [meta] "word-1" ... "word-n" | = "existing-English-verb"       */
724 /*                                         | <grammar-line-1> ... <g-line-n> */
725 /*                                                                           */
726 /* ------------------------------------------------------------------------- */
727
728 extern void make_verb(void)
729 {
730     /*  Parse an entire Verb ... directive.                                  */
731
732     int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
733
734     char *English_verbs_given[MAX_VERB_SYNONYMS];
735     int no_given = 0, i;
736
737     directive_keywords.enabled = TRUE;
738
739     get_next_token();
740
741     if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
742     {   meta_verb_flag = TRUE;
743         get_next_token();
744     }
745
746     while ((token_type == DQ_TT) || (token_type == SQ_TT))
747     {
748         if (no_given >= MAX_VERB_SYNONYMS) {
749             error("Too many synonyms in a Verb directive.");
750             panic_mode_error_recovery(); return;
751         }
752         English_verbs_given[no_given++] = token_text;
753         get_next_token();
754     }
755
756     if (no_given == 0)
757     {   ebf_error("English verb in quotes", token_text);
758         panic_mode_error_recovery(); return;
759     }
760
761     if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
762     {   verb_equals_form = TRUE;
763         get_next_token();
764         Inform_verb = get_verb();
765         if (Inform_verb == -1) return;
766         get_next_token();
767         if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
768             ebf_error("';' after English verb", token_text);
769     }
770     else
771     {   Inform_verb = no_Inform_verbs;
772         if (no_Inform_verbs == MAX_VERBS)
773             memoryerror("MAX_VERBS",MAX_VERBS);
774     }
775
776     for (i=0; i<no_given; i++)
777     {   dictionary_add(English_verbs_given[i],
778             0x41 + ((meta_verb_flag)?0x02:0x00),
779             (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
780         register_verb(English_verbs_given[i], Inform_verb);
781     }
782
783     if (!verb_equals_form)
784     {   int lines = 0;
785         put_token_back();
786         while (grammar_line(no_Inform_verbs, lines++)) ;
787         Inform_verbs[no_Inform_verbs++].lines = --lines;
788     }
789
790     directive_keywords.enabled = FALSE;
791 }
792
793 /* ------------------------------------------------------------------------- */
794 /*   The Extend directive:                                                   */
795 /*                                                                           */
796 /*      Extend | only "verb-1" ... "verb-n"  |             <grammar-lines>   */
797 /*             | "verb"                      | "replace"                     */
798 /*                                           | "first"                       */
799 /*                                           | "last"                        */
800 /*                                                                           */
801 /* ------------------------------------------------------------------------- */
802
803 #define EXTEND_REPLACE 1
804 #define EXTEND_FIRST   2
805 #define EXTEND_LAST    3
806
807 extern void extend_verb(void)
808 {
809     /*  Parse an entire Extend ... directive.                                */
810
811     int Inform_verb = -1, k, l, lines, extend_mode;
812
813     directive_keywords.enabled = TRUE;
814     directives.enabled = FALSE;
815
816     get_next_token();
817     if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
818     {   l = -1;
819         if (no_Inform_verbs == MAX_VERBS)
820             memoryerror("MAX_VERBS", MAX_VERBS);
821         while (get_next_token(),
822                ((token_type == DQ_TT) || (token_type == SQ_TT)))
823         {   Inform_verb = get_verb();
824             if (Inform_verb == -1) return;
825             if ((l!=-1) && (Inform_verb!=l))
826               warning_named("Verb disagrees with previous verbs:", token_text);
827             l = Inform_verb;
828             dictionary_set_verb_number(token_text,
829               (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
830             /* make call to renumber verb in English_verb_list too */
831             if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
832               warning_named("Verb to extend not found in English_verb_list:",
833                  token_text);
834         }
835
836         /*  Copy the old Inform-verb into a new one which the list of
837             English-verbs given have had their dictionary entries modified
838             to point to                                                      */
839
840         Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
841         Inform_verb = no_Inform_verbs++;
842     }
843     else
844     {   Inform_verb = get_verb();
845         if (Inform_verb == -1) return;
846         get_next_token();
847     }
848
849     /*  Inform_verb now contains the number of the Inform-verb to extend...  */
850
851     extend_mode = EXTEND_LAST;
852     if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
853         put_token_back();
854     else
855     {   extend_mode = 0;
856         if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
857             extend_mode = EXTEND_REPLACE;
858         if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
859             extend_mode = EXTEND_FIRST;
860         if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
861             extend_mode = EXTEND_LAST;
862
863         if (extend_mode==0)
864         {   ebf_error("'replace', 'last', 'first' or '*'", token_text);
865             extend_mode = EXTEND_LAST;
866         }
867     }
868
869     l = Inform_verbs[Inform_verb].lines;
870     lines = 0;
871     if (extend_mode == EXTEND_LAST) lines=l;
872     do
873     {   if (extend_mode == EXTEND_FIRST)
874             for (k=l; k>0; k--)
875                  Inform_verbs[Inform_verb].l[k+lines]
876                      = Inform_verbs[Inform_verb].l[k-1+lines];
877     } while (grammar_line(Inform_verb, lines++));
878
879     if (extend_mode == EXTEND_FIRST)
880     {   Inform_verbs[Inform_verb].lines = l+lines-1;
881         for (k=0; k<l; k++)
882             Inform_verbs[Inform_verb].l[k+lines-1]
883                 = Inform_verbs[Inform_verb].l[k+lines];
884     }
885     else Inform_verbs[Inform_verb].lines = --lines;
886
887     directive_keywords.enabled = FALSE;
888     directives.enabled = TRUE;
889 }
890
891 /* ========================================================================= */
892 /*   Data structure management routines                                      */
893 /* ------------------------------------------------------------------------- */
894
895 extern void init_verbs_vars(void)
896 {
897     no_fake_actions = 0;
898     no_actions = 0;
899     no_grammar_lines = 0;
900     no_grammar_tokens = 0;
901     English_verb_list_size = 0;
902
903     Inform_verbs = NULL;
904     action_byte_offset = NULL;
905     grammar_token_routine = NULL;
906     adjectives = NULL;
907     adjective_sort_code = NULL;
908     English_verb_list = NULL;
909
910     if (!glulx_mode)
911         grammar_version_number = 1;
912     else
913         grammar_version_number = 2;
914 }
915
916 extern void verbs_begin_pass(void)
917 {
918     no_Inform_verbs=0; no_adjectives=0;
919     no_grammar_token_routines=0;
920     no_actions=0;
921
922     no_fake_actions=0;
923     grammar_lines_top = 0;
924 }
925
926 extern void verbs_allocate_arrays(void)
927 {
928     Inform_verbs          = my_calloc(sizeof(verbt),   MAX_VERBS, "verbs");
929     grammar_lines         = my_malloc(MAX_LINESPACE, "grammar lines");
930     action_byte_offset    = my_calloc(sizeof(int32),   MAX_ACTIONS, "actions");
931     action_symbol         = my_calloc(sizeof(int32),   MAX_ACTIONS,
932                                 "action symbols");
933     grammar_token_routine = my_calloc(sizeof(int32),   MAX_ACTIONS,
934                                 "grammar token routines");
935     adjectives            = my_calloc(sizeof(int32),   MAX_ADJECTIVES,
936                                 "adjectives");
937     adjective_sort_code   = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
938                                 "adjective sort codes");
939
940     English_verb_list     = my_malloc(MAX_VERBSPACE, "register of verbs");
941     English_verb_list_top = English_verb_list;
942 }
943
944 extern void verbs_free_arrays(void)
945 {
946     my_free(&Inform_verbs, "verbs");
947     my_free(&grammar_lines, "grammar lines");
948     my_free(&action_byte_offset, "actions");
949     my_free(&action_symbol, "action symbols");
950     my_free(&grammar_token_routine, "grammar token routines");
951     my_free(&adjectives, "adjectives");
952     my_free(&adjective_sort_code, "adjective sort codes");
953     my_free(&English_verb_list, "register of verbs");
954 }
955
956 /* ========================================================================= */