Updating compiler to commit dabfa73 from upstream dated Oct 1 2016. These changes...
[inform.git] / verbs.c
1 /* ------------------------------------------------------------------------- */
2 /*   "verbs" :  Manages actions and grammar tables; parses the directives    */
3 /*              Verb and Extend.                                             */
4 /*                                                                           */
5 /*  Copyright (c) Graham Nelson 1993 - 2016                                  */
6 /*                                                                           */
7 /* This file is part of Inform.                                              */
8 /*                                                                           */
9 /* Inform is free software: you can redistribute it and/or modify            */
10 /* it under the terms of the GNU General Public License as published by      */
11 /* the Free Software Foundation, either version 3 of the License, or         */
12 /* (at your option) any later version.                                       */
13 /*                                                                           */
14 /* Inform is distributed in the hope that it will be useful,                 */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of            */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the              */
17 /* GNU General Public License for more details.                              */
18 /*                                                                           */
19 /* You should have received a copy of the GNU General Public License         */
20 /* along with Inform. If not, see https://gnu.org/licenses/                  */
21 /*                                                                           */
22 /* ------------------------------------------------------------------------- */
23
24 #include "header.h"
25
26 int grammar_version_number;            /* 1 for pre-Inform 6.06 table format */
27 int32 grammar_version_symbol;          /* Index of "Grammar__Version"
28                                           within symbols table               */
29
30 /* ------------------------------------------------------------------------- */
31 /*   Actions.                                                                */
32 /* ------------------------------------------------------------------------- */
33 /*   Array defined below:                                                    */
34 /*                                                                           */
35 /*    int32   action_byte_offset[n]       The (byte) offset in the Z-machine */
36 /*                                        code area of the ...Sub routine    */
37 /*                                        for action n.  (NB: This is left   */
38 /*                                        blank until the end of the         */
39 /*                                        compilation pass.)                 */
40 /*    int32   action_symbol[n]            The symbol table index of the n-th */
41 /*                                        action's name.                     */
42 /* ------------------------------------------------------------------------- */
43
44 int no_actions,                        /* Number of actions made so far      */
45     no_fake_actions;                   /* Number of fake actions made so far */
46
47 /* ------------------------------------------------------------------------- */
48 /*   Adjectives.  (The term "adjective" is traditional; they are mainly      */
49 /*                prepositions, such as "onto".)                             */
50 /* ------------------------------------------------------------------------- */
51 /*   Arrays defined below:                                                   */
52 /*                                                                           */
53 /*    int32 adjectives[n]                 Byte address of dictionary entry   */
54 /*                                        for the nth adjective              */
55 /*    dict_word adjective_sort_code[n]    Dictionary sort code of nth adj    */
56 /* ------------------------------------------------------------------------- */
57
58 int no_adjectives;                     /* Number of adjectives made so far   */
59
60 /* ------------------------------------------------------------------------- */
61 /*   Verbs.  Note that Inform-verbs are not quite the same as English verbs: */
62 /*           for example the English verbs "take" and "drop" both normally   */
63 /*           correspond in a game's dictionary to the same Inform verb.  An  */
64 /*           Inform verb is essentially a list of grammar lines.             */
65 /* ------------------------------------------------------------------------- */
66 /*   Arrays defined below:                                                   */
67 /*                                                                           */
68 /*    verbt Inform_verbs[n]               The n-th grammar line sequence:    */
69 /*                                        see "header.h" for the definition  */
70 /*                                        of the typedef struct verbt        */
71 /*    int32 grammar_token_routine[n]      The byte offset from start of code */
72 /*                                        area of the n-th one               */
73 /* ------------------------------------------------------------------------- */
74
75 int no_Inform_verbs,                   /* Number of Inform-verbs made so far */
76     no_grammar_token_routines;         /* Number of routines given in tokens */
77
78 /* ------------------------------------------------------------------------- */
79 /*   We keep a list of English verb-words known (e.g. "take" or "eat") and   */
80 /*   which Inform-verbs they correspond to.  (This list is needed for some   */
81 /*   of the grammar extension operations.)                                   */
82 /*   The format of this list is a sequence of variable-length records:       */
83 /*                                                                           */
84 /*     Byte offset to start of next record  (1 byte)                         */
85 /*     Inform verb number this word corresponds to  (1 byte)                 */
86 /*     The English verb-word (reduced to lower case), null-terminated        */
87 /* ------------------------------------------------------------------------- */
88
89 static char *English_verb_list,        /* First byte of first record         */
90             *English_verb_list_top;    /* Next byte free for new record      */
91
92 static int English_verb_list_size;     /* Size of the list in bytes
93                                           (redundant but convenient)         */
94
95 /* ------------------------------------------------------------------------- */
96 /*   Arrays used by this file                                                */
97 /* ------------------------------------------------------------------------- */
98
99   verbt   *Inform_verbs;
100   uchar   *grammar_lines;
101   int32    grammar_lines_top;
102   int      no_grammar_lines, no_grammar_tokens;
103
104   int32   *action_byte_offset,
105           *action_symbol,
106           *grammar_token_routine,
107           *adjectives;
108   static uchar *adjective_sort_code;
109
110 /* ------------------------------------------------------------------------- */
111 /*   Tracing for compiler maintenance                                        */
112 /* ------------------------------------------------------------------------- */
113
114 extern void list_verb_table(void)
115 {   int i;
116     for (i=0; i<no_Inform_verbs; i++)
117         printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
118 }
119
120 /* ------------------------------------------------------------------------- */
121 /*   Actions.                                                                */
122 /* ------------------------------------------------------------------------- */
123
124 static void new_action(char *b, int c)
125 {
126     /*  Called whenever a new action (or fake action) is created (either
127         by using make_action above, or the Fake_Action directive, or by
128         the linker).  At present just a hook for some tracing code.          */
129
130     if (printprops_switch)
131         printf("Action '%s' is numbered %d\n",b,c);
132 }
133
134 /* Note that fake actions are numbered from a high base point upwards;
135    real actions are numbered from 0 upward in GV2.                           */
136
137 extern void make_fake_action(void)
138 {   int i;
139     char action_sub[MAX_IDENTIFIER_LENGTH+4];
140     debug_location_beginning beginning_debug_location =
141         get_token_location_beginning();
142
143     get_next_token();
144     if (token_type != SYMBOL_TT)
145     {   discard_token_location(beginning_debug_location);
146         ebf_error("new fake action name", token_text);
147         panic_mode_error_recovery(); return;
148     }
149
150     snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", token_text);
151     i = symbol_index(action_sub, -1);
152
153     if (!(sflags[i] & UNKNOWN_SFLAG))
154     {   discard_token_location(beginning_debug_location);
155         ebf_error("new fake action name", token_text);
156         panic_mode_error_recovery(); return;
157     }
158
159     assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
160         FAKE_ACTION_T);
161
162     new_action(token_text, i);
163
164     if (debugfile_switch)
165     {   debug_file_printf("<fake-action>");
166         debug_file_printf("<identifier>##%s</identifier>", token_text);
167         debug_file_printf("<value>%d</value>", svals[i]);
168         get_next_token();
169         write_debug_locations
170             (get_token_location_end(beginning_debug_location));
171         put_token_back();
172         debug_file_printf("</fake-action>");
173     }
174
175     return;
176 }
177
178 extern assembly_operand action_of_name(char *name)
179 {
180     /*  Returns the action number of the given name, creating it as a new
181         action name if it isn't already known as such.                       */
182
183     char action_sub[MAX_IDENTIFIER_LENGTH+4];
184     int j;
185     assembly_operand AO;
186
187     snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name);
188     j = symbol_index(action_sub, -1);
189
190     if (stypes[j] == FAKE_ACTION_T)
191     {   INITAO(&AO);
192         AO.value = svals[j];
193         if (!glulx_mode)
194           AO.type = LONG_CONSTANT_OT;
195         else
196           set_constant_ot(&AO);
197         sflags[j] |= USED_SFLAG;
198         return AO;
199     }
200
201     if (sflags[j] & UNKNOWN_SFLAG)
202     {
203         if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
204         new_action(name, no_actions);
205         action_symbol[no_actions] = j;
206         assign_symbol(j, no_actions++, CONSTANT_T);
207         sflags[j] |= ACTION_SFLAG;
208     }
209     sflags[j] |= USED_SFLAG;
210
211     INITAO(&AO);
212     AO.value = svals[j];
213     AO.marker = ACTION_MV;
214     if (!glulx_mode) {
215       AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
216       if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
217     }
218     else {
219       AO.type = CONSTANT_OT;
220     }
221     return AO;
222 }
223
224 extern void find_the_actions(void)
225 {   int i; int32 j;
226     char action_name[MAX_IDENTIFIER_LENGTH+4];
227     char action_sub[MAX_IDENTIFIER_LENGTH+4];
228
229     if (module_switch)
230         for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
231     else
232     for (i=0; i<no_actions; i++)
233     {   strcpy(action_name, (char *) symbs[action_symbol[i]]);
234         action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
235         strcpy(action_sub, action_name);
236         strcat(action_sub, "Sub");
237         j = symbol_index(action_sub, -1);
238         if (sflags[j] & UNKNOWN_SFLAG)
239         {
240             error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
241         }
242         else
243         if (stypes[j] != ROUTINE_T)
244         {
245             error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
246             error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
247         }
248         else
249         {   action_byte_offset[i] = svals[j];
250             sflags[j] |= USED_SFLAG;
251         }
252     }
253 }
254
255 /* ------------------------------------------------------------------------- */
256 /*   Adjectives.                                                             */
257 /* ------------------------------------------------------------------------- */
258
259 static int make_adjective(char *English_word)
260 {
261     /*  Returns adjective number of the English word supplied, creating
262         a new adjective number if need be.
263
264         Note that (partly for historical reasons) adjectives are numbered
265         from 0xff downwards.  (And partly to make them stand out as tokens.)
266
267         This routine is used only in grammar version 1: the corresponding
268         table is left empty in GV2.                                          */
269
270     int i; 
271     uchar new_sort_code[MAX_DICT_WORD_BYTES];
272
273     if (no_adjectives >= MAX_ADJECTIVES)
274         memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
275
276     dictionary_prepare(English_word, new_sort_code);
277     for (i=0; i<no_adjectives; i++)
278         if (compare_sorts(new_sort_code,
279           adjective_sort_code+i*DICT_WORD_BYTES) == 0)
280             return(0xff-i);
281     adjectives[no_adjectives]
282         = dictionary_add(English_word,8,0,0xff-no_adjectives);
283     copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
284         new_sort_code);
285     return(0xff-no_adjectives++);
286 }
287
288 /* ------------------------------------------------------------------------- */
289 /*   Parsing routines.                                                       */
290 /* ------------------------------------------------------------------------- */
291
292 static int make_parsing_routine(int32 routine_address)
293 {
294     /*  This routine is used only in grammar version 1: the corresponding
295         table is left empty in GV2.                                          */
296
297     int l;
298     for (l=0; l<no_grammar_token_routines; l++)
299         if (grammar_token_routine[l] == routine_address)
300             return l;
301
302     grammar_token_routine[l] = routine_address;
303     return(no_grammar_token_routines++);
304 }
305
306 /* ------------------------------------------------------------------------- */
307 /*   The English-verb list.                                                  */
308 /* ------------------------------------------------------------------------- */
309
310 static int find_or_renumber_verb(char *English_verb, int *new_number)
311 {
312     /*  If new_number is null, returns the Inform-verb number which the
313      *  given English verb causes, or -1 if the given verb is not in the
314      *  dictionary                     */
315
316     /*  If new_number is non-null, renumbers the Inform-verb number which
317      *  English_verb matches in English_verb_list to account for the case
318      *  when we are extending a verb.  Returns 0 if successful, or -1 if
319      *  the given verb is not in the dictionary (which shouldn't happen as
320      *  get_verb has already run) */
321
322     char *p;
323     p=English_verb_list;
324     while (p < English_verb_list_top)
325     {   if (strcmp(English_verb, p+3) == 0)
326         {   if (new_number)
327             {   p[1] = (*new_number)/256;
328                 p[2] = (*new_number)%256;
329                 return 0;
330             }
331             return(256*((uchar)p[1]))+((uchar)p[2]);
332         }
333         p=p+(uchar)p[0];
334     }
335     return(-1);
336 }
337
338 static void register_verb(char *English_verb, int number)
339 {
340     /*  Registers a new English verb as referring to the given Inform-verb
341         number.  (See comments above for format of the list.)                */
342
343     if (find_or_renumber_verb(English_verb, NULL) != -1)
344     {   error_named("Two different verb definitions refer to", English_verb);
345         return;
346     }
347
348     English_verb_list_size += strlen(English_verb)+4;
349     if (English_verb_list_size >= MAX_VERBSPACE)
350         memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
351
352     English_verb_list_top[0] = 4+strlen(English_verb);
353     English_verb_list_top[1] = number/256;
354     English_verb_list_top[2] = number%256;
355     strcpy(English_verb_list_top+3, English_verb);
356     English_verb_list_top += English_verb_list_top[0];
357 }
358
359 static int get_verb(void)
360 {
361     /*  Look at the last-read token: if it's the name of an English verb
362         understood by Inform, in double-quotes, then return the Inform-verb
363         that word refers to: otherwise give an error and return -1.          */
364
365     int j;
366
367     if ((token_type == DQ_TT) || (token_type == SQ_TT))
368     {   j = find_or_renumber_verb(token_text, NULL);
369         if (j==-1)
370             error_named("There is no previous grammar for the verb",
371                 token_text);
372         return j;
373     }
374
375     ebf_error("an English verb in quotes", token_text);
376
377     return -1;
378 }
379
380 /* ------------------------------------------------------------------------- */
381 /*   Grammar lines for Verb/Extend directives.                               */
382 /* ------------------------------------------------------------------------- */
383
384 static int grammar_line(int verbnum, int line)
385 {
386     /*  Parse a grammar line, to be written into grammar_lines[mark] onward.
387
388         Syntax: * <token1> ... <token-n> -> <action>
389
390         is compiled to a table in the form:
391
392                 <action number : word>
393                 <token 1> ... <token n> <ENDIT>
394
395         where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
396
397         If grammar_version_number is 1, the token holds
398
399                 <bytecode> 00 00
400
401         and otherwise a GV2 token.
402
403         Return TRUE if grammar continues after the line, FALSE if the
404         directive comes to an end.                                           */
405
406     int j, bytecode, mark; int32 wordcode;
407     int grammar_token, slash_mode, last_was_slash;
408     int reverse_action, TOKEN_SIZE;
409     debug_location_beginning beginning_debug_location =
410         get_token_location_beginning();
411
412     get_next_token();
413     if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
414     {   discard_token_location(beginning_debug_location);
415         return FALSE;
416     }
417     if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
418     {   discard_token_location(beginning_debug_location);
419         ebf_error("'*' divider", token_text);
420         panic_mode_error_recovery();
421         return FALSE;
422     }
423
424     /*  Have we run out of lines or token space?  */
425
426     if (line >= MAX_LINES_PER_VERB)
427     {   discard_token_location(beginning_debug_location);
428         error("Too many lines of grammar for verb. This maximum is built \
429 into Inform, so suggest rewriting grammar using general parsing routines");
430         return(FALSE);
431     }
432
433     /*  Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long  */
434     /*  In Glulx, that's 5*32 + 4 = 164 bytes */
435
436     mark = grammar_lines_top;
437     if (!glulx_mode) {
438         if (mark + 100 >= MAX_LINESPACE)
439         {   discard_token_location(beginning_debug_location);
440             memoryerror("MAX_LINESPACE", MAX_LINESPACE);
441         }
442     }
443     else {
444         if (mark + 165 >= MAX_LINESPACE)
445         {   discard_token_location(beginning_debug_location);
446             memoryerror("MAX_LINESPACE", MAX_LINESPACE);
447         }
448     }
449
450     Inform_verbs[verbnum].l[line] = mark;
451
452     if (!glulx_mode) {
453         mark = mark + 2;
454         TOKEN_SIZE = 3;
455     }
456     else {
457         mark = mark + 3;
458         TOKEN_SIZE = 5;
459     }
460
461     grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
462     no_grammar_lines++;
463
464     do
465     {   get_next_token();
466         bytecode = 0; wordcode = 0;
467         if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
468         {   discard_token_location(beginning_debug_location);
469             ebf_error("'->' clause", token_text);
470             return FALSE;
471         }
472         if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
473         {   if (last_was_slash && (grammar_token>0))
474                 ebf_error("grammar token", token_text);
475             break;
476         }
477
478         if (!last_was_slash) slash_mode = FALSE;
479         if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
480         {   if (grammar_version_number == 1)
481                 error("'/' can only be used with Library 6/3 or later");
482             if (last_was_slash)
483                 ebf_error("grammar token or '->'", token_text);
484             else
485             {   last_was_slash = TRUE;
486                 slash_mode = TRUE;
487                 if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
488                     error("'/' can only be applied to prepositions");
489                 grammar_lines[mark-TOKEN_SIZE] |= 0x20;
490                 continue;
491             }
492         }
493         else last_was_slash = FALSE;
494
495         if ((token_type == DQ_TT) || (token_type == SQ_TT))
496         {    if (grammar_version_number == 1)
497                  bytecode = make_adjective(token_text);
498              else
499              {   bytecode = 0x42;
500                  wordcode = dictionary_add(token_text, 8, 0, 0);
501              }
502         }
503         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
504              {   get_next_token();
505                  if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
506                  {
507                      /*  noun = <routine>                                    */
508
509                      get_next_token();
510                      if ((token_type != SYMBOL_TT)
511                          || (stypes[token_value] != ROUTINE_T))
512                      {   discard_token_location(beginning_debug_location);
513                          ebf_error("routine name after 'noun='", token_text);
514                          panic_mode_error_recovery();
515                          return FALSE;
516                      }
517                      if (grammar_version_number == 1)
518                          bytecode
519                              = 16 + make_parsing_routine(svals[token_value]);
520                      else
521                      {   bytecode = 0x83;
522                          wordcode = svals[token_value];
523                      }
524                      sflags[token_value] |= USED_SFLAG;
525                  }
526                  else
527                  {   put_token_back();
528                      if (grammar_version_number == 1) bytecode=0;
529                      else { bytecode = 1; wordcode = 0; }
530                  }
531              }
532         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
533              {   if (grammar_version_number==1) bytecode=1;
534                  else { bytecode=1; wordcode=1; } }
535         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
536              {   if (grammar_version_number==1) bytecode=2;
537                  else { bytecode=1; wordcode=2; } }
538         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
539              {   if (grammar_version_number==1) bytecode=3;
540                  else { bytecode=1; wordcode=3; } }
541         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
542              {   if (grammar_version_number==1) bytecode=4;
543                  else { bytecode=1; wordcode=4; } }
544         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
545              {   if (grammar_version_number==1) bytecode=5;
546                  else { bytecode=1; wordcode=5; } }
547         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
548              {   if (grammar_version_number==1) bytecode=6;
549                  else { bytecode=1; wordcode=6; } }
550         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
551              {   if (grammar_version_number==1) bytecode=7;
552                  else { bytecode=1; wordcode=7; } }
553         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
554              {   if (grammar_version_number==1) bytecode=8;
555                  else { bytecode=1; wordcode=8; } }
556         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
557              {   if (grammar_version_number==1)
558                      error("The 'topic' token is only available if you \
559 are using Library 6/3 or later");
560                  else { bytecode=1; wordcode=9; } }
561         else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
562              {
563                  /*  scope = <routine> */
564
565                  get_next_token();
566                  if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
567                  {   discard_token_location(beginning_debug_location);
568                      ebf_error("'=' after 'scope'", token_text);
569                      panic_mode_error_recovery();
570                      return FALSE;
571                  }
572
573                  get_next_token();
574                  if ((token_type != SYMBOL_TT)
575                      || (stypes[token_value] != ROUTINE_T))
576                  {   discard_token_location(beginning_debug_location);
577                      ebf_error("routine name after 'scope='", token_text);
578                      panic_mode_error_recovery();
579                      return FALSE;
580                  }
581
582                  if (grammar_version_number == 1)
583                      bytecode = 80 +
584                          make_parsing_routine(svals[token_value]);
585                  else { bytecode = 0x85; wordcode = svals[token_value]; }
586                  sflags[token_value] |= USED_SFLAG;
587              }
588         else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
589              {   discard_token_location(beginning_debug_location);
590                  error("'=' is only legal here as 'noun=Routine'");
591                  panic_mode_error_recovery();
592                  return FALSE;
593              }
594         else {   /*  <attribute>  or  <general-parsing-routine>  tokens      */
595
596                  if ((token_type != SYMBOL_TT)
597                      || ((stypes[token_value] != ATTRIBUTE_T)
598                          && (stypes[token_value] != ROUTINE_T)))
599                  {   discard_token_location(beginning_debug_location);
600                      error_named("No such grammar token as", token_text);
601                      panic_mode_error_recovery();
602                      return FALSE;
603                  }
604                  if (stypes[token_value]==ATTRIBUTE_T)
605                  {   if (grammar_version_number == 1)
606                          bytecode = 128 + svals[token_value];
607                      else { bytecode = 4; wordcode = svals[token_value]; }
608                  }
609                  else
610                  {   if (grammar_version_number == 1)
611                          bytecode = 48 +
612                              make_parsing_routine(svals[token_value]);
613                      else { bytecode = 0x86; wordcode = svals[token_value]; }
614                  }
615                  sflags[token_value] |= USED_SFLAG;
616              }
617
618         grammar_token++; no_grammar_tokens++;
619         if ((grammar_version_number == 1) && (grammar_token > 6))
620         {   if (grammar_token == 7)
621                 warning("Grammar line cut short: you can only have up to 6 \
622 tokens in any line (unless you're compiling with library 6/3 or later)");
623         }
624         else
625         {   if (slash_mode)
626             {   if (bytecode != 0x42)
627                     error("'/' can only be applied to prepositions");
628                 bytecode |= 0x10;
629             }
630             grammar_lines[mark++] = bytecode;
631             if (!glulx_mode) {
632                 grammar_lines[mark++] = wordcode/256;
633                 grammar_lines[mark++] = wordcode%256;
634             }
635             else {
636                 grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
637                 grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
638                 grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
639                 grammar_lines[mark++] = ((wordcode) & 0xFF);
640             }
641         }
642
643     } while (TRUE);
644
645     grammar_lines[mark++] = 15;
646     grammar_lines_top = mark;
647
648     dont_enter_into_symbol_table = TRUE;
649     get_next_token();
650     dont_enter_into_symbol_table = FALSE;
651
652     if (token_type != DQ_TT)
653     {   discard_token_location(beginning_debug_location);
654         ebf_error("name of new or existing action", token_text);
655         panic_mode_error_recovery();
656         return FALSE;
657     }
658
659     {   assembly_operand AO = action_of_name(token_text);
660         j = AO.value;
661         if (j >= ((grammar_version_number==1)?256:4096))
662             error_named("This is a fake action, not a real one:", token_text);
663     }
664
665     reverse_action = FALSE;
666     get_next_token();
667     if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
668     {   if (grammar_version_number == 1)
669             error("'reverse' actions can only be used with \
670 Library 6/3 or later");
671         reverse_action = TRUE;
672     }
673     else put_token_back();
674
675     mark = Inform_verbs[verbnum].l[line];
676
677     if (debugfile_switch)
678     {   debug_file_printf("<table-entry>");
679         debug_file_printf("<type>grammar line</type>");
680         debug_file_printf("<address>");
681         write_debug_grammar_backpatch(mark);
682         debug_file_printf("</address>");
683         debug_file_printf("<end-address>");
684         write_debug_grammar_backpatch(grammar_lines_top);
685         debug_file_printf("</end-address>");
686         write_debug_locations
687             (get_token_location_end(beginning_debug_location));
688         debug_file_printf("</table-entry>");
689     }
690
691     if (!glulx_mode) {
692         if (reverse_action)
693             j = j + 0x400;
694         grammar_lines[mark++] = j/256;
695         grammar_lines[mark++] = j%256;
696     }
697     else {
698         grammar_lines[mark++] = ((j >> 8) & 0xFF);
699         grammar_lines[mark++] = ((j) & 0xFF);
700         grammar_lines[mark++] = (reverse_action ? 1 : 0);
701     }
702
703     return TRUE;
704 }
705
706 /* ------------------------------------------------------------------------- */
707 /*   The Verb directive:                                                     */
708 /*                                                                           */
709 /*       Verb [meta] "word-1" ... "word-n" | = "existing-English-verb"       */
710 /*                                         | <grammar-line-1> ... <g-line-n> */
711 /*                                                                           */
712 /* ------------------------------------------------------------------------- */
713
714 extern void make_verb(void)
715 {
716     /*  Parse an entire Verb ... directive.                                  */
717
718     int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
719
720     char *English_verbs_given[32]; int no_given = 0, i;
721
722     directive_keywords.enabled = TRUE;
723
724     get_next_token();
725
726     if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
727     {   meta_verb_flag = TRUE;
728         get_next_token();
729     }
730
731     while ((token_type == DQ_TT) || (token_type == SQ_TT))
732     {   English_verbs_given[no_given++] = token_text;
733         get_next_token();
734     }
735
736     if (no_given == 0)
737     {   ebf_error("English verb in quotes", token_text);
738         panic_mode_error_recovery(); return;
739     }
740
741     if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
742     {   verb_equals_form = TRUE;
743         get_next_token();
744         Inform_verb = get_verb();
745         if (Inform_verb == -1) return;
746         get_next_token();
747         if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
748             ebf_error("';' after English verb", token_text);
749     }
750     else
751     {   Inform_verb = no_Inform_verbs;
752         if (no_Inform_verbs == MAX_VERBS)
753             memoryerror("MAX_VERBS",MAX_VERBS);
754     }
755
756     for (i=0; i<no_given; i++)
757     {   dictionary_add(English_verbs_given[i],
758             0x41 + ((meta_verb_flag)?0x02:0x00),
759             (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
760         register_verb(English_verbs_given[i], Inform_verb);
761     }
762
763     if (!verb_equals_form)
764     {   int lines = 0;
765         put_token_back();
766         while (grammar_line(no_Inform_verbs, lines++)) ;
767         Inform_verbs[no_Inform_verbs++].lines = --lines;
768     }
769
770     directive_keywords.enabled = FALSE;
771 }
772
773 /* ------------------------------------------------------------------------- */
774 /*   The Extend directive:                                                   */
775 /*                                                                           */
776 /*      Extend | only "verb-1" ... "verb-n"  |             <grammar-lines>   */
777 /*             | "verb"                      | "replace"                     */
778 /*                                           | "first"                       */
779 /*                                           | "last"                        */
780 /*                                                                           */
781 /* ------------------------------------------------------------------------- */
782
783 #define EXTEND_REPLACE 1
784 #define EXTEND_FIRST   2
785 #define EXTEND_LAST    3
786
787 extern void extend_verb(void)
788 {
789     /*  Parse an entire Extend ... directive.                                */
790
791     int Inform_verb = -1, k, l, lines, extend_mode;
792
793     directive_keywords.enabled = TRUE;
794     directives.enabled = FALSE;
795
796     get_next_token();
797     if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
798     {   l = -1;
799         if (no_Inform_verbs == MAX_VERBS)
800             memoryerror("MAX_VERBS", MAX_VERBS);
801         while (get_next_token(),
802                ((token_type == DQ_TT) || (token_type == SQ_TT)))
803         {   Inform_verb = get_verb();
804             if (Inform_verb == -1) return;
805             if ((l!=-1) && (Inform_verb!=l))
806               warning_named("Verb disagrees with previous verbs:", token_text);
807             l = Inform_verb;
808             dictionary_set_verb_number(token_text,
809               (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
810             /* make call to renumber verb in English_verb_list too */
811             if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
812               warning_named("Verb to extend not found in English_verb_list:",
813                  token_text);
814         }
815
816         /*  Copy the old Inform-verb into a new one which the list of
817             English-verbs given have had their dictionary entries modified
818             to point to                                                      */
819
820         Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
821         Inform_verb = no_Inform_verbs++;
822     }
823     else
824     {   Inform_verb = get_verb();
825         if (Inform_verb == -1) return;
826         get_next_token();
827     }
828
829     /*  Inform_verb now contains the number of the Inform-verb to extend...  */
830
831     extend_mode = EXTEND_LAST;
832     if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
833         put_token_back();
834     else
835     {   extend_mode = 0;
836         if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
837             extend_mode = EXTEND_REPLACE;
838         if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
839             extend_mode = EXTEND_FIRST;
840         if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
841             extend_mode = EXTEND_LAST;
842
843         if (extend_mode==0)
844         {   ebf_error("'replace', 'last', 'first' or '*'", token_text);
845             extend_mode = EXTEND_LAST;
846         }
847     }
848
849     l = Inform_verbs[Inform_verb].lines;
850     lines = 0;
851     if (extend_mode == EXTEND_LAST) lines=l;
852     do
853     {   if (extend_mode == EXTEND_FIRST)
854             for (k=l; k>0; k--)
855                  Inform_verbs[Inform_verb].l[k+lines]
856                      = Inform_verbs[Inform_verb].l[k-1+lines];
857     } while (grammar_line(Inform_verb, lines++));
858
859     if (extend_mode == EXTEND_FIRST)
860     {   Inform_verbs[Inform_verb].lines = l+lines-1;
861         for (k=0; k<l; k++)
862             Inform_verbs[Inform_verb].l[k+lines-1]
863                 = Inform_verbs[Inform_verb].l[k+lines];
864     }
865     else Inform_verbs[Inform_verb].lines = --lines;
866
867     directive_keywords.enabled = FALSE;
868     directives.enabled = TRUE;
869 }
870
871 /* ========================================================================= */
872 /*   Data structure management routines                                      */
873 /* ------------------------------------------------------------------------- */
874
875 extern void init_verbs_vars(void)
876 {
877     no_fake_actions = 0;
878     no_actions = 0;
879     no_grammar_lines = 0;
880     no_grammar_tokens = 0;
881     English_verb_list_size = 0;
882
883     Inform_verbs = NULL;
884     action_byte_offset = NULL;
885     grammar_token_routine = NULL;
886     adjectives = NULL;
887     adjective_sort_code = NULL;
888     English_verb_list = NULL;
889
890     if (!glulx_mode)
891         grammar_version_number = 1;
892     else
893         grammar_version_number = 2;
894 }
895
896 extern void verbs_begin_pass(void)
897 {
898     no_Inform_verbs=0; no_adjectives=0;
899     no_grammar_token_routines=0;
900     no_actions=0;
901
902     no_fake_actions=0;
903     grammar_lines_top = 0;
904 }
905
906 extern void verbs_allocate_arrays(void)
907 {
908     Inform_verbs          = my_calloc(sizeof(verbt),   MAX_VERBS, "verbs");
909     grammar_lines         = my_malloc(MAX_LINESPACE, "grammar lines");
910     action_byte_offset    = my_calloc(sizeof(int32),   MAX_ACTIONS, "actions");
911     action_symbol         = my_calloc(sizeof(int32),   MAX_ACTIONS,
912                                 "action symbols");
913     grammar_token_routine = my_calloc(sizeof(int32),   MAX_ACTIONS,
914                                 "grammar token routines");
915     adjectives            = my_calloc(sizeof(int32),   MAX_ADJECTIVES,
916                                 "adjectives");
917     adjective_sort_code   = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
918                                 "adjective sort codes");
919
920     English_verb_list     = my_malloc(MAX_VERBSPACE, "register of verbs");
921     English_verb_list_top = English_verb_list;
922 }
923
924 extern void verbs_free_arrays(void)
925 {
926     my_free(&Inform_verbs, "verbs");
927     my_free(&grammar_lines, "grammar lines");
928     my_free(&action_byte_offset, "actions");
929     my_free(&action_symbol, "action symbols");
930     my_free(&grammar_token_routine, "grammar token routines");
931     my_free(&adjectives, "adjectives");
932     my_free(&adjective_sort_code, "adjective sort codes");
933     my_free(&English_verb_list, "register of verbs");
934 }
935
936 /* ========================================================================= */