SF.net SVN: geany: [1181] trunk

eht16 at users.sourceforge.net eht16 at xxxxx
Sat Jan 13 20:16:09 UTC 2007


Revision: 1181
          http://svn.sourceforge.net/geany/?rev=1181&view=rev
Author:   eht16
Date:     2007-01-13 12:16:09 -0800 (Sat, 13 Jan 2007)

Log Message:
-----------
Added new filetypes Javascript and Lua.

Modified Paths:
--------------
    trunk/ChangeLog
    trunk/data/filetype_extensions.conf
    trunk/scintilla/KeyWords.cxx
    trunk/scintilla/Makefile.am
    trunk/scintilla/makefile.win32
    trunk/src/filetypes.c
    trunk/src/filetypes.h
    trunk/src/highlighting.c
    trunk/src/highlighting.h
    trunk/src/sci_cb.c
    trunk/src/templates.c
    trunk/tagmanager/Makefile.am
    trunk/tagmanager/makefile.win32
    trunk/tagmanager/parsers.h

Added Paths:
-----------
    trunk/data/filetypes.javascript
    trunk/data/filetypes.lua
    trunk/scintilla/LexLua.cxx
    trunk/tagmanager/js.c
    trunk/tagmanager/lua.c

Modified: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/ChangeLog	2007-01-13 20:16:09 UTC (rev 1181)
@@ -1,3 +1,14 @@
+2007-01-13  Enrico Tröger  <enrico.troeger at uvena.de>
+
+ * data/filetype_extensions.conf, data/filetypes.javascript,
+   data/filetypes.lua, scintilla/KeyWords.cxx, scintilla/LexLua.cxx,
+   scintilla/Makefile.am, scintilla/makefile.win32, src/filetypes.c,
+   src/filetypes.h, src/highlighting.c, src/highlighting.h,
+   src/sci_cb.c, src/templates.c, tagmanager/js.c, tagmanager/lua.c,
+   tagmanager/Makefile.am, tagmanager/makefile.win32,
+   tagmanager/parsers.h: Added new filetypes Javascript and Lua.
+
+
 2007-01-13  Nick Treleaven  <nick.treleaven at btinternet.com>
 
  * src/main.c, src/socket.c, po/POTFILES.in:

Modified: trunk/data/filetype_extensions.conf
===================================================================
--- trunk/data/filetype_extensions.conf	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/data/filetype_extensions.conf	2007-01-13 20:16:09 UTC (rev 1181)
@@ -12,9 +12,11 @@
 CAML=*.ml;*.mli;
 Perl=*.pl;*.perl;*.pm;*.agi;
 PHP=*.php;*.php3;*.php4;*.php5;*.phtml;
+Javascript=*.js;*.jsp;
 Python=*.py;*.pyw;
 Ruby=*.rb;*.rhtml;
 Tcl=*.tcl;*.tk;*.wish;
+Lua=*.lua;
 Ferite=*.fe;
 Sh=*.sh;configure;configure.in;configure.in.in;configure.ac;*.ksh;*.zsh;
 Make=*.mak;*.mk;GNUmakefile;makefile;Makefile;makefile.*;Makefile.*;

Added: trunk/data/filetypes.javascript
===================================================================
--- trunk/data/filetypes.javascript	                        (rev 0)
+++ trunk/data/filetypes.javascript	2007-01-13 20:16:09 UTC (rev 1181)
@@ -0,0 +1,52 @@
+# For complete documentation of this file, please see Geany's main documentation
+[styling]
+# foreground;background;bold;italic
+default=0x000000;0xffffff;false;false
+comment=0xd00000;0xffffff;false;false
+commentline=0xd00000;0xffffff;false;false
+commentdoc=0x3f5fbf;0xffffff;false;false
+number=0x007f00;0xffffff;false;false
+word=0x00007f;0xffffff;true;false
+word2=0x991111;0xffffff;true;false
+string=0xff901e;0xffffff;false;false
+character=0xff901e;0xffffff;false;false
+uuid=0x404080;0xffffff;false;false
+preprocessor=0x007F7F;0xffffff;false;false
+operator=0x301010;0xffffff;false;false
+identifier=0x000000;0xffffff;false;false
+stringeol=0x000000;0xe0c0e0;false;false
+verbatim=0x101030;0xffffff;false;false
+regex=0x105090;0xffffff;false;false
+commentlinedoc=0x3f5fbf;0xffffff;true;false
+commentdockeyword=0x3f5fbf;0xffffff;true;true
+commentdockeyworderror=0x3f5fbf;0xffffff;false;false
+globalclass=0x0000d0;0xffffff;true;false
+
+[keywords]
+# all items must be in one line
+primary=abstract boolean break byte case catch char class const continue debugger default delete do double else enum export extends final finally float for function goto if implements import in instanceof int interface long native new package private protected public return short static super switch synchronized this throw throws transient try typeof var void volatile while with
+
+[settings]
+# the following characters are these which a "word" can contains, see documentation
+#wordchars=_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+
+# if only single comment char is supported like # in this file, leave comment_close blank
+comment_open=//
+comment_close=
+
+# set to false if a comment character/string should start at column 0 of a line, true uses any
+# indention of the line, e.g. setting to true causes the following on pressing CTRL+d
+	#command_example();
+# setting to false would generate this
+#	command_example();
+# This setting works only for single line comments
+comment_use_indent=true
+
+[build_settings]
+# %f will be replaced by the complete filename
+# %e will be replaced by the filename without extension
+# (use only one of it at one time)
+compiler=
+run=
+
+

Added: trunk/data/filetypes.lua
===================================================================
--- trunk/data/filetypes.lua	                        (rev 0)
+++ trunk/data/filetypes.lua	2007-01-13 20:16:09 UTC (rev 1181)
@@ -0,0 +1,65 @@
+# For complete documentation of this file, please see Geany's main documentation
+[styling]
+# foreground;background;bold;italic
+default=0x000000;0xffffff;false;false
+comment=0xd00000;0xffffff;false;false
+commentline=0xd00000;0xffffff;false;false
+commentdoc=0x3f5fbf;0xffffff;true;false
+number=0x007f00;0xffffff;false;false
+word=0x00007f;0xffffff;true;false
+string=0xff901e;0xffffff;false;false
+character=0x008000;0xffffff;false;false
+literalstring=0x008020;0xffffff;false;false
+preprocessor=0x007f7f;0xffffff;false;false
+operator=0x301010;0xffffff;false;false
+identifier=0x000000;0xffffff;false;false
+stringeol=0x000000;0xe0c0e0;false;false
+function_basic=0x991111;0xffffff;false;false
+function_other=0x690000;0xffffff;false;false
+coroutines=0x66005c;0xffffff;false;false
+word5=0x7979ff;0xffffff;false;false
+word6=0xad00ff;0xffffff;false;false
+word7=0x03D000;0xffffff;false;false
+word8=0xff7600;0xffffff;false;false
+
+[keywords]
+# all items must be in one line
+keywords=and break do else elseif end false for function if in local nil not or repeat return then true until while
+# Basic functions
+function_basic=_ALERT assert call collectgarbage coroutine debug dofile dostring error _ERRORMESSAGE foreach foreachi _G gcinfo getfenv getmetatable getn globals _INPUT io ipairs load loadfile loadlib loadstring math module newtype next os _OUTPUT pairs pcall print _PROMPT rawequal rawget rawset require select setfenv setmetatable sort _STDERR _STDIN _STDOUT string table tinsert tonumber tostring tremove type unpack _VERSION xpcall
+# String, (table) & math functions
+function_other=abs acos asin atan atan2 ceil cos deg exp floor format frexp gsub ldexp log log10 math.abs math.acos math.asin math.atan math.atan2 math.ceil math.cos math.cosh math.deg math.exp math.floor math.fmod math.frexp math.huge math.ldexp math.log math.log10 math.max math.min math.mod math.modf math.pi math.pow math.rad math.random math.randomseed math.sin math.sinh math.sqrt math.tan math.tanh max min mod rad random randomseed sin sqrt strbyte strchar strfind string.byte string.char string.dump string.find string.format string.gfind string.gmatch string.gsub string.len string.lower string.match string.rep string.reverse string.sub string.upper strlen strlower strrep strsub strupper table.concat table.foreach table.foreachi table.getn table.insert table.maxn table.remove table.setn table.sort tan
+# (coroutines), I/O & system facilities
+coroutines=appendto clock closefile coroutine.create coroutine.resume coroutine.running coroutine.status coroutine.wrap coroutine.yield date difftime execute exit flush getenv io.close io.flush io.input io.lines io.open io.output io.popen io.read io.stderr io.stdin io.stdout io.tmpfile io.type io.write openfile os.clock os.date os.difftime os.execute os.exit os.getenv os.remove os.rename os.setlocale os.time os.tmpname package.cpath package.loaded package.loadlib package.path package.preload package.seeall read readfrom remove rename seek setlocale time tmpfile tmpname write writeto
+# user definable keywords
+user1=
+user2=
+user3=
+user4=
+
+[settings]
+# the following characters are these which a "word" can contains, see documentation
+#wordchars=_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+
+# if only single comment char is supported like # in this file, leave comment_close blank
+comment_open=--
+comment_close=
+# this is an alternative way, so multiline comments are used
+#comment_open=--[[
+#comment_close=]]--
+
+# set to false if a comment character/string should start at column 0 of a line, true uses any
+# indention of the line, e.g. setting to true causes the following on pressing CTRL+d
+	#command_example();
+# setting to false would generate this
+#	command_example();
+# This setting works only for single line comments
+comment_use_indent=true
+
+[build_settings]
+# %f will be replaced by the complete filename
+# %e will be replaced by the filename without extension
+# (use only one of it at one time)
+compiler=
+run_cmd=lua "%f"
+

Modified: trunk/scintilla/KeyWords.cxx
===================================================================
--- trunk/scintilla/KeyWords.cxx	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/scintilla/KeyWords.cxx	2007-01-13 20:16:09 UTC (rev 1181)
@@ -167,6 +167,7 @@
 	LINK_LEXER(lmTCL);
 	LINK_LEXER(lmVHDL);
 	LINK_LEXER(lmD);
+	LINK_LEXER(lmLua);
 
 //--Autogenerated -- end of automatically generated section
 

Added: trunk/scintilla/LexLua.cxx
===================================================================
--- trunk/scintilla/LexLua.cxx	                        (rev 0)
+++ trunk/scintilla/LexLua.cxx	2007-01-13 20:16:09 UTC (rev 1181)
@@ -0,0 +1,360 @@
+// Scintilla source code edit control
+/** @file LexLua.cxx
+ ** Lexer for Lua language.
+ **
+ ** Written by Paul Winwood.
+ ** Folder by Alexey Yutkin.
+ ** Modified by Marcos E. Wurzius & Philippe Lhoste
+ **/
+
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "Platform.h"
+
+#include "PropSet.h"
+#include "Accessor.h"
+#include "StyleContext.h"
+#include "KeyWords.h"
+#include "Scintilla.h"
+#include "SciLexer.h"
+
+// Extended to accept accented characters
+static inline bool IsAWordChar(int ch) {
+	return ch >= 0x80 ||
+	       (isalnum(ch) || ch == '.' || ch == '_');
+}
+
+static inline bool IsAWordStart(int ch) {
+	return ch >= 0x80 ||
+	       (isalpha(ch) || ch == '_');
+}
+
+static inline bool IsANumberChar(int ch) {
+	// Not exactly following number definition (several dots are seen as OK, etc.)
+	// but probably enough in most cases.
+	return (ch < 0x80) &&
+	        (isdigit(ch) || toupper(ch) == 'E' ||
+	        ch == '.' || ch == '-' || ch == '+' ||
+	        (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F'));
+}
+
+static inline bool IsLuaOperator(int ch) {
+	if (ch >= 0x80 || isalnum(ch)) {
+		return false;
+	}
+	// '.' left out as it is used to make up numbers
+	if (ch == '*' || ch == '/' || ch == '-' || ch == '+' ||
+		ch == '(' || ch == ')' || ch == '=' ||
+		ch == '{' || ch == '}' || ch == '~' ||
+		ch == '[' || ch == ']' || ch == ';' ||
+		ch == '<' || ch == '>' || ch == ',' ||
+		ch == '.' || ch == '^' || ch == '%' || ch == ':' ||
+		ch == '#') {
+		return true;
+	}
+	return false;
+}
+
+// Test for [=[ ... ]=] delimiters, returns 0 if it's only a [ or ],
+// return 1 for [[ or ]], returns >=2 for [=[ or ]=] and so on.
+// The maximum number of '=' characters allowed is 254.
+static int LongDelimCheck(StyleContext &sc) {
+	int sep = 1;
+	while (sc.GetRelative(sep) == '=' && sep < 0xFF)
+		sep++;
+	if (sc.GetRelative(sep) == sc.ch)
+		return sep;
+	return 0;
+}
+
+static void ColouriseLuaDoc(
+	unsigned int startPos,
+	int length,
+	int initStyle,
+	WordList *keywordlists[],
+	Accessor &styler) {
+
+	WordList &keywords = *keywordlists[0];
+	WordList &keywords2 = *keywordlists[1];
+	WordList &keywords3 = *keywordlists[2];
+	WordList &keywords4 = *keywordlists[3];
+	WordList &keywords5 = *keywordlists[4];
+	WordList &keywords6 = *keywordlists[5];
+	WordList &keywords7 = *keywordlists[6];
+	WordList &keywords8 = *keywordlists[7];
+
+	int currentLine = styler.GetLine(startPos);
+	// Initialize long string [[ ... ]] or block comment --[[ ... ]] nesting level,
+	// if we are inside such a string. Block comment was introduced in Lua 5.0,
+	// blocks with separators [=[ ... ]=] in Lua 5.1.
+	int nestLevel = 0;
+	int sepCount = 0;
+	if (initStyle == SCE_LUA_LITERALSTRING || initStyle == SCE_LUA_COMMENT) {
+		int lineState = styler.GetLineState(currentLine - 1);
+		nestLevel = lineState >> 8;
+		sepCount = lineState & 0xFF;
+	}
+
+	// Do not leak onto next line
+	if (initStyle == SCE_LUA_STRINGEOL || initStyle == SCE_LUA_COMMENTLINE || initStyle == SCE_LUA_PREPROCESSOR) {
+		initStyle = SCE_LUA_DEFAULT;
+	}
+
+	StyleContext sc(startPos, length, initStyle, styler);
+	if (startPos == 0 && sc.ch == '#') {
+		// shbang line: # is a comment only if first char of the script
+		sc.SetState(SCE_LUA_COMMENTLINE);
+	}
+	for (; sc.More(); sc.Forward()) {
+		if (sc.atLineEnd) {
+			// Update the line state, so it can be seen by next line
+			currentLine = styler.GetLine(sc.currentPos);
+			switch (sc.state) {
+			case SCE_LUA_LITERALSTRING:
+			case SCE_LUA_COMMENT:
+				// Inside a literal string or block comment, we set the line state
+				styler.SetLineState(currentLine, (nestLevel << 8) | sepCount);
+				break;
+			default:
+				// Reset the line state
+				styler.SetLineState(currentLine, 0);
+				break;
+			}
+		}
+		if (sc.atLineStart && (sc.state == SCE_LUA_STRING)) {
+			// Prevent SCE_LUA_STRINGEOL from leaking back to previous line
+			sc.SetState(SCE_LUA_STRING);
+		}
+
+		// Handle string line continuation
+		if ((sc.state == SCE_LUA_STRING || sc.state == SCE_LUA_CHARACTER) &&
+		        sc.ch == '\\') {
+			if (sc.chNext == '\n' || sc.chNext == '\r') {
+				sc.Forward();
+				if (sc.ch == '\r' && sc.chNext == '\n') {
+					sc.Forward();
+				}
+				continue;
+			}
+		}
+
+		// Determine if the current state should terminate.
+		if (sc.state == SCE_LUA_OPERATOR) {
+			sc.SetState(SCE_LUA_DEFAULT);
+		} else if (sc.state == SCE_LUA_NUMBER) {
+			// We stop the number definition on non-numerical non-dot non-eE non-sign non-hexdigit char
+			if (!IsANumberChar(sc.ch)) {
+				sc.SetState(SCE_LUA_DEFAULT);
+			} else if (sc.ch == '-' || sc.ch == '+') {
+                                if (sc.chPrev != 'E' && sc.chPrev != 'e')
+                                        sc.SetState(SCE_LUA_DEFAULT);
+                        }
+		} else if (sc.state == SCE_LUA_IDENTIFIER) {
+			if (!IsAWordChar(sc.ch) || sc.Match('.', '.')) {
+				char s[100];
+				sc.GetCurrent(s, sizeof(s));
+				if (keywords.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD);
+				} else if (keywords2.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD2);
+				} else if (keywords3.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD3);
+				} else if (keywords4.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD4);
+				} else if (keywords5.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD5);
+				} else if (keywords6.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD6);
+				} else if (keywords6.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD6);
+				} else if (keywords7.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD7);
+				} else if (keywords8.InList(s)) {
+					sc.ChangeState(SCE_LUA_WORD8);
+				}
+				sc.SetState(SCE_LUA_DEFAULT);
+			}
+		} else if (sc.state == SCE_LUA_COMMENTLINE || sc.state == SCE_LUA_PREPROCESSOR) {
+			if (sc.atLineEnd) {
+				sc.ForwardSetState(SCE_LUA_DEFAULT);
+			}
+		} else if (sc.state == SCE_LUA_STRING) {
+			if (sc.ch == '\\') {
+				if (sc.chNext == '\"' || sc.chNext == '\'' || sc.chNext == '\\') {
+					sc.Forward();
+				}
+			} else if (sc.ch == '\"') {
+				sc.ForwardSetState(SCE_LUA_DEFAULT);
+			} else if (sc.atLineEnd) {
+				sc.ChangeState(SCE_LUA_STRINGEOL);
+				sc.ForwardSetState(SCE_LUA_DEFAULT);
+			}
+		} else if (sc.state == SCE_LUA_CHARACTER) {
+			if (sc.ch == '\\') {
+				if (sc.chNext == '\"' || sc.chNext == '\'' || sc.chNext == '\\') {
+					sc.Forward();
+				}
+			} else if (sc.ch == '\'') {
+				sc.ForwardSetState(SCE_LUA_DEFAULT);
+			} else if (sc.atLineEnd) {
+				sc.ChangeState(SCE_LUA_STRINGEOL);
+				sc.ForwardSetState(SCE_LUA_DEFAULT);
+			}
+		} else if (sc.state == SCE_LUA_LITERALSTRING || sc.state == SCE_LUA_COMMENT) {
+			if (sc.ch == '[') {
+				int sep = LongDelimCheck(sc);
+				if (sep == 1 && sepCount == 1) {    // [[-only allowed to nest
+					nestLevel++;
+					sc.Forward();
+				}
+			} else if (sc.ch == ']') {
+				int sep = LongDelimCheck(sc);
+				if (sep == 1 && sepCount == 1) {    // un-nest with ]]-only
+					nestLevel--;
+					sc.Forward();
+					if (nestLevel == 0) {
+						sc.ForwardSetState(SCE_LUA_DEFAULT);
+					}
+				} else if (sep > 1 && sep == sepCount) {   // ]=]-style delim
+					sc.Forward(sep);
+					sc.ForwardSetState(SCE_LUA_DEFAULT);
+				}
+			}
+		}
+
+		// Determine if a new state should be entered.
+		if (sc.state == SCE_LUA_DEFAULT) {
+			if (IsADigit(sc.ch) || (sc.ch == '.' && IsADigit(sc.chNext))) {
+				sc.SetState(SCE_LUA_NUMBER);
+				if (sc.ch == '0' && toupper(sc.chNext) == 'X') {
+					sc.Forward(1);
+				}
+			} else if (IsAWordStart(sc.ch)) {
+				sc.SetState(SCE_LUA_IDENTIFIER);
+			} else if (sc.ch == '\"') {
+				sc.SetState(SCE_LUA_STRING);
+			} else if (sc.ch == '\'') {
+				sc.SetState(SCE_LUA_CHARACTER);
+			} else if (sc.ch == '[') {
+				sepCount = LongDelimCheck(sc);
+				if (sepCount == 0) {
+					sc.SetState(SCE_LUA_OPERATOR);
+				} else {
+					nestLevel = 1;
+					sc.SetState(SCE_LUA_LITERALSTRING);
+					sc.Forward(sepCount);
+				}
+			} else if (sc.Match('-', '-')) {
+				sc.SetState(SCE_LUA_COMMENTLINE);
+				if (sc.Match("--[")) {
+					sc.Forward(2);
+					sepCount = LongDelimCheck(sc);
+					if (sepCount > 0) {
+						nestLevel = 1;
+						sc.ChangeState(SCE_LUA_COMMENT);
+						sc.Forward(sepCount);
+					}
+				} else {
+					sc.Forward();
+				}
+			} else if (sc.atLineStart && sc.Match('$')) {
+				sc.SetState(SCE_LUA_PREPROCESSOR);	// Obsolete since Lua 4.0, but still in old code
+			} else if (IsLuaOperator(static_cast<char>(sc.ch))) {
+				sc.SetState(SCE_LUA_OPERATOR);
+			}
+		}
+	}
+	sc.Complete();
+}
+
+static void FoldLuaDoc(unsigned int startPos, int length, int /* initStyle */, WordList *[],
+                       Accessor &styler) {
+	unsigned int lengthDoc = startPos + length;
+	int visibleChars = 0;
+	int lineCurrent = styler.GetLine(startPos);
+	int levelPrev = styler.LevelAt(lineCurrent) & SC_FOLDLEVELNUMBERMASK;
+	int levelCurrent = levelPrev;
+	char chNext = styler[startPos];
+	bool foldCompact = styler.GetPropertyInt("fold.compact", 1) != 0;
+	int styleNext = styler.StyleAt(startPos);
+	char s[10];
+
+	for (unsigned int i = startPos; i < lengthDoc; i++) {
+		char ch = chNext;
+		chNext = styler.SafeGetCharAt(i + 1);
+		int style = styleNext;
+		styleNext = styler.StyleAt(i + 1);
+		bool atEOL = (ch == '\r' && chNext != '\n') || (ch == '\n');
+		if (style == SCE_LUA_WORD) {
+			if (ch == 'i' || ch == 'd' || ch == 'f' || ch == 'e' || ch == 'r' || ch == 'u') {
+				for (unsigned int j = 0; j < 8; j++) {
+					if (!iswordchar(styler[i + j])) {
+						break;
+					}
+					s[j] = styler[i + j];
+					s[j + 1] = '\0';
+				}
+
+				if ((strcmp(s, "if") == 0) || (strcmp(s, "do") == 0) || (strcmp(s, "function") == 0) || (strcmp(s, "repeat") == 0)) {
+					levelCurrent++;
+				}
+				if ((strcmp(s, "end") == 0) || (strcmp(s, "elseif") == 0) || (strcmp(s, "until") == 0)) {
+					levelCurrent--;
+				}
+			}
+		} else if (style == SCE_LUA_OPERATOR) {
+			if (ch == '{' || ch == '(') {
+				levelCurrent++;
+			} else if (ch == '}' || ch == ')') {
+				levelCurrent--;
+			}
+		} else if (style == SCE_LUA_LITERALSTRING || style == SCE_LUA_COMMENT) {
+			if (ch == '[') {
+				levelCurrent++;
+			} else if (ch == ']') {
+				levelCurrent--;
+			}
+		}
+
+		if (atEOL) {
+			int lev = levelPrev;
+			if (visibleChars == 0 && foldCompact) {
+				lev |= SC_FOLDLEVELWHITEFLAG;
+			}
+			if ((levelCurrent > levelPrev) && (visibleChars > 0)) {
+				lev |= SC_FOLDLEVELHEADERFLAG;
+			}
+			if (lev != styler.LevelAt(lineCurrent)) {
+				styler.SetLevel(lineCurrent, lev);
+			}
+			lineCurrent++;
+			levelPrev = levelCurrent;
+			visibleChars = 0;
+		}
+		if (!isspacechar(ch)) {
+			visibleChars++;
+		}
+	}
+	// Fill in the real level of the next line, keeping the current flags as they will be filled in later
+
+	int flagsNext = styler.LevelAt(lineCurrent) & ~SC_FOLDLEVELNUMBERMASK;
+	styler.SetLevel(lineCurrent, levelPrev | flagsNext);
+}
+
+static const char * const luaWordListDesc[] = {
+	"Keywords",
+	"Basic functions",
+	"String, (table) & math functions",
+	"(coroutines), I/O & system facilities",
+	"user1",
+	"user2",
+	"user3",
+	"user4",
+	0
+};
+
+LexerModule lmLua(SCLEX_LUA, ColouriseLuaDoc, "lua", FoldLuaDoc, luaWordListDesc);

Modified: trunk/scintilla/Makefile.am
===================================================================
--- trunk/scintilla/Makefile.am	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/scintilla/Makefile.am	2007-01-13 20:16:09 UTC (rev 1181)
@@ -19,6 +19,7 @@
 LexD.cxx \
 LexFortran.cxx \
 LexHTML.cxx \
+LexLua.cxx \
 LexOthers.cxx \
 LexPascal.cxx \
 LexPerl.cxx \

Modified: trunk/scintilla/makefile.win32
===================================================================
--- trunk/scintilla/makefile.win32	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/scintilla/makefile.win32	2007-01-13 20:16:09 UTC (rev 1181)
@@ -62,7 +62,7 @@
 LEXOBJS=\
 LexBash.o LexAsm.o LexCSS.o LexConf.o LexCPP.o LexCrontab.o LexHTML.o LexOthers.o LexPascal.o \
 LexPerl.o LexPython.o LexSQL.o LexCaml.o LexOMS.o LexTCL.o LexRuby.o LexFortran.o LexVHDL.o \
-LexD.o
+LexD.o LeyLua.o
 #--Autogenerated -- end of automatically generated section
 
 all: $(COMPLIB)

Modified: trunk/src/filetypes.c
===================================================================
--- trunk/src/filetypes.c	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/src/filetypes.c	2007-01-13 20:16:09 UTC (rev 1181)
@@ -1,7 +1,8 @@
 /*
  *      filetypes.c - this file is part of Geany, a fast and lightweight IDE
  *
- *      Copyright 2006 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2005-2007 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2006-2007 Nick Treleaven <nick.treleaven at btinternet.com>
  *
  *      This program is free software; you can redistribute it and/or modify
  *      it under the terms of the GNU General Public License as published by
@@ -65,7 +66,9 @@
 	FILETYPE_UID_DIFF,		// 23
 	FILETYPE_UID_FERITE,	// 24
 	FILETYPE_UID_HTML,		// 25
-	FILETYPE_UID_VHDL		// 26
+	FILETYPE_UID_VHDL,		// 26
+	FILETYPE_UID_JS,		// 27
+	FILETYPE_UID_LUA		// 28
 };
 
 
@@ -108,6 +111,8 @@
 		case FILETYPE_UID_FERITE:	return filetypes[GEANY_FILETYPES_FERITE];
 		case FILETYPE_UID_HTML:		return filetypes[GEANY_FILETYPES_HTML];
 		case FILETYPE_UID_VHDL:		return filetypes[GEANY_FILETYPES_VHDL];
+		case FILETYPE_UID_JS:		return filetypes[GEANY_FILETYPES_JS];
+		case FILETYPE_UID_LUA:		return filetypes[GEANY_FILETYPES_LUA];
 		default: 					return NULL;
 	}
 }
@@ -338,6 +343,26 @@
 	filetypes_init_build_programs(filetypes[GEANY_FILETYPES_PHP]);
 	filetypes_create_menu_item(filetype_menu, filetypes[GEANY_FILETYPES_PHP]->title, filetypes[GEANY_FILETYPES_PHP]);
 
+#define JAVASCRIPT
+	filetypes[GEANY_FILETYPES_JS] = g_new0(filetype, 1);
+	filetypes[GEANY_FILETYPES_JS]->id = GEANY_FILETYPES_JS;
+	filetypes[GEANY_FILETYPES_JS]->uid = FILETYPE_UID_JS;
+	filetypes[GEANY_FILETYPES_JS]->item = NULL;
+	filetypes[GEANY_FILETYPES_JS]->lang = 23;
+	filetypes[GEANY_FILETYPES_JS]->name = g_strdup("Javascript");
+	filetypes[GEANY_FILETYPES_JS]->has_tags = TRUE;
+	filetypes[GEANY_FILETYPES_JS]->title = g_strdup(_("Javascript source file"));
+	filetypes[GEANY_FILETYPES_JS]->extension = g_strdup("js");
+	filetypes[GEANY_FILETYPES_JS]->pattern = g_new0(gchar*, 3);
+	filetypes[GEANY_FILETYPES_JS]->pattern[0] = g_strdup("*.js");
+	filetypes[GEANY_FILETYPES_JS]->pattern[1] = g_strdup("*.jsp"); /// TODO what is jsp actually?
+	filetypes[GEANY_FILETYPES_JS]->pattern[2] = NULL;
+	filetypes[GEANY_FILETYPES_JS]->style_func_ptr = styleset_js;
+	filetypes[GEANY_FILETYPES_JS]->comment_open = g_strdup("//");
+	filetypes[GEANY_FILETYPES_JS]->comment_close = NULL;
+	filetypes_init_build_programs(filetypes[GEANY_FILETYPES_JS]);
+	filetypes_create_menu_item(filetype_menu, filetypes[GEANY_FILETYPES_JS]->title, filetypes[GEANY_FILETYPES_JS]);
+
 #define PYTHON
 	filetypes[GEANY_FILETYPES_PYTHON] = g_new0(filetype, 1);
 	filetypes[GEANY_FILETYPES_PYTHON]->id = GEANY_FILETYPES_PYTHON;
@@ -399,6 +424,25 @@
 	filetypes_init_build_programs(filetypes[GEANY_FILETYPES_TCL]);
 	filetypes_create_menu_item(filetype_menu, filetypes[GEANY_FILETYPES_TCL]->title, filetypes[GEANY_FILETYPES_TCL]);
 
+#define LUA
+	filetypes[GEANY_FILETYPES_LUA] = g_new0(filetype, 1);
+	filetypes[GEANY_FILETYPES_LUA]->id = GEANY_FILETYPES_LUA;
+	filetypes[GEANY_FILETYPES_LUA]->uid = FILETYPE_UID_LUA;
+	filetypes[GEANY_FILETYPES_LUA]->item = NULL;
+	filetypes[GEANY_FILETYPES_LUA]->lang = 22;
+	filetypes[GEANY_FILETYPES_LUA]->name = g_strdup("Lua");
+	filetypes[GEANY_FILETYPES_LUA]->has_tags = TRUE;
+	filetypes[GEANY_FILETYPES_LUA]->title = g_strdup(_("Lua source file"));
+	filetypes[GEANY_FILETYPES_LUA]->extension = g_strdup("lua");
+	filetypes[GEANY_FILETYPES_LUA]->pattern = g_new0(gchar*, 2);
+	filetypes[GEANY_FILETYPES_LUA]->pattern[0] = g_strdup("*.lua");
+	filetypes[GEANY_FILETYPES_LUA]->pattern[1] = NULL;
+	filetypes[GEANY_FILETYPES_LUA]->style_func_ptr = styleset_lua;
+	filetypes[GEANY_FILETYPES_LUA]->comment_open = g_strdup("--");
+	filetypes[GEANY_FILETYPES_LUA]->comment_close = NULL;
+	filetypes_init_build_programs(filetypes[GEANY_FILETYPES_LUA]);
+	filetypes_create_menu_item(filetype_menu, filetypes[GEANY_FILETYPES_LUA]->title, filetypes[GEANY_FILETYPES_LUA]);
+
 #define FERITE
 	filetypes[GEANY_FILETYPES_FERITE] = g_new0(filetype, 1);
 	filetypes[GEANY_FILETYPES_FERITE]->id = GEANY_FILETYPES_FERITE;

Modified: trunk/src/filetypes.h
===================================================================
--- trunk/src/filetypes.h	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/src/filetypes.h	2007-01-13 20:16:09 UTC (rev 1181)
@@ -1,7 +1,8 @@
 /*
  *      filetypes.h - this file is part of Geany, a fast and lightweight IDE
  *
- *      Copyright 2006 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2005-2007 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2006-2007 Nick Treleaven <nick.treleaven at btinternet.com>
  *
  *      This program is free software; you can redistribute it and/or modify
  *      it under the terms of the GNU General Public License as published by
@@ -44,9 +45,11 @@
 	GEANY_FILETYPES_CAML,
 	GEANY_FILETYPES_PERL,
 	GEANY_FILETYPES_PHP,
+	GEANY_FILETYPES_JS,
 	GEANY_FILETYPES_PYTHON,
 	GEANY_FILETYPES_RUBY,
 	GEANY_FILETYPES_TCL,
+	GEANY_FILETYPES_LUA,
 	GEANY_FILETYPES_FERITE,
 	GEANY_FILETYPES_SH,
 	GEANY_FILETYPES_MAKE,

Modified: trunk/src/highlighting.c
===================================================================
--- trunk/src/highlighting.c	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/src/highlighting.c	2007-01-13 20:16:09 UTC (rev 1181)
@@ -1,7 +1,8 @@
 /*
  *      highlighting.c - this file is part of Geany, a fast and lightweight IDE
  *
- *      Copyright 2006 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2005-2007 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2006-2007 Nick Treleaven <nick.treleaven at btinternet.com>
  *
  *      This program is free software; you can redistribute it and/or modify
  *      it under the terms of the GNU General Public License as published by
@@ -2755,3 +2756,168 @@
 	set_sci_style(sci, SCE_VHDL_USERWORD, GEANY_FILETYPES_VHDL, 14);
 }
 
+
+static void styleset_js_init(void)
+{
+	GKeyFile *config = g_key_file_new();
+	GKeyFile *config_home = g_key_file_new();
+
+	load_keyfiles(config, config_home, GEANY_FILETYPES_JS);
+
+	new_style_array(GEANY_FILETYPES_JS, 20);
+	styleset_c_like_init(config, config_home, GEANY_FILETYPES_JS);
+
+	style_sets[GEANY_FILETYPES_JS].keywords = g_new(gchar*, 2);
+	get_keyfile_keywords(config, config_home, "keywords", "primary", GEANY_FILETYPES_JS, 0, "\
+			abstract boolean break byte case catch char class \
+			const continue debugger default delete do double else enum export extends \
+			final finally float for function goto if implements import in instanceof \
+			int interface long native new package private protected public \
+			return short static super switch synchronized this throw throws \
+			transient try typeof var void volatile while with");
+	style_sets[GEANY_FILETYPES_JS].keywords[1] = NULL;
+
+	get_keyfile_wordchars(config, config_home, &style_sets[GEANY_FILETYPES_JS].wordchars);
+	filetypes_get_config(config, config_home, GEANY_FILETYPES_JS);
+
+	g_key_file_free(config);
+	g_key_file_free(config_home);
+}
+
+
+void styleset_js(ScintillaObject *sci)
+{
+	styleset_common(sci, 5);
+	if (style_sets[GEANY_FILETYPES_JS].styling == NULL) styleset_js_init();
+
+	SSM(sci, SCI_SETWORDCHARS, 0, (sptr_t) style_sets[GEANY_FILETYPES_JS].wordchars);
+	SSM(sci, SCI_AUTOCSETMAXHEIGHT, app->autocompletion_max_height, 0);
+
+	SSM(sci, SCI_SETLEXER, SCLEX_CPP, 0);
+
+	SSM(sci, SCI_SETCONTROLCHARSYMBOL, 32, 0);
+
+	SSM(sci, SCI_SETKEYWORDS, 0, (sptr_t) style_sets[GEANY_FILETYPES_JS].keywords[0]);
+
+	styleset_c_like(sci, GEANY_FILETYPES_JS);
+}
+
+
+static void styleset_lua_init(void)
+{
+	GKeyFile *config = g_key_file_new();
+	GKeyFile *config_home = g_key_file_new();
+
+	load_keyfiles(config, config_home, GEANY_FILETYPES_LUA);
+
+	new_style_array(GEANY_FILETYPES_LUA, 20);
+
+	get_keyfile_hex(config, config_home, "styling", "default", "0x000000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[0]);
+	get_keyfile_hex(config, config_home, "styling", "comment", "0xd00000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[1]);
+	get_keyfile_hex(config, config_home, "styling", "commentline", "0xd00000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[2]);
+	get_keyfile_hex(config, config_home, "styling", "commentdoc", "0x3f5fbf", "0xffffff", "true", &style_sets[GEANY_FILETYPES_LUA].styling[3]);
+	get_keyfile_hex(config, config_home, "styling", "number", "0x007f00", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[4]);
+	get_keyfile_hex(config, config_home, "styling", "word", "0x00007f", "0xffffff", "true", &style_sets[GEANY_FILETYPES_LUA].styling[5]);
+	get_keyfile_hex(config, config_home, "styling", "string", "0xff901e", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[6]);
+	get_keyfile_hex(config, config_home, "styling", "character", "0x008000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[7]);
+	get_keyfile_hex(config, config_home, "styling", "literalstring", "0x008020", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[8]);
+	get_keyfile_hex(config, config_home, "styling", "preprocessor", "0x007f7f", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[9]);
+	get_keyfile_hex(config, config_home, "styling", "operator", "0x301010", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[10]);
+	get_keyfile_hex(config, config_home, "styling", "identifier", "0x000000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[11]);
+	get_keyfile_hex(config, config_home, "styling", "stringeol", "0x000000", "0xe0c0e0", "false", &style_sets[GEANY_FILETYPES_LUA].styling[12]);
+	get_keyfile_hex(config, config_home, "styling", "function_basic", "0x991111", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[13]);
+	get_keyfile_hex(config, config_home, "styling", "function_other", "0x690000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[14]);
+	get_keyfile_hex(config, config_home, "styling", "coroutines", "0x66005c", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[15]);
+	get_keyfile_hex(config, config_home, "styling", "word5", "0x7979ff", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[16]);
+	get_keyfile_hex(config, config_home, "styling", "word6", "0xad00ff", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[17]);
+	get_keyfile_hex(config, config_home, "styling", "word7", "0x03D000", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[18]);
+	get_keyfile_hex(config, config_home, "styling", "word8", "0xff7600", "0xffffff", "false", &style_sets[GEANY_FILETYPES_LUA].styling[19]);
+
+	style_sets[GEANY_FILETYPES_LUA].keywords = g_new(gchar*, 9);
+	get_keyfile_keywords(config, config_home, "keywords", "keywords", GEANY_FILETYPES_LUA, 0,
+			"and break do else elseif end false for function if \
+			 in local nil not or repeat return then true until while");
+	get_keyfile_keywords(config, config_home, "keywords", "function_basic", GEANY_FILETYPES_LUA, 1,
+			"_VERSION assert collectgarbage dofile error gcinfo loadfile loadstring \
+			 print rawget rawset require tonumber tostring type unpack \
+			 _ALERT _ERRORMESSAGE _INPUT _PROMPT _OUTPUT \
+			 _STDERR _STDIN _STDOUT call dostring foreach foreachi getn globals newtype \
+			 sort tinsert tremove _G getfenv getmetatable ipairs loadlib next pairs pcall \
+			 rawequal setfenv setmetatable xpcall string table math coroutine io os debug \
+			 load module select");
+	get_keyfile_keywords(config, config_home, "keywords", "function_other", GEANY_FILETYPES_LUA, 2,
+			"abs acos asin atan atan2 ceil cos deg exp \
+			 floor format frexp gsub ldexp log log10 max min mod rad random randomseed \
+			 sin sqrt strbyte strchar strfind strlen strlower strrep strsub strupper tan \
+			 string.byte string.char string.dump string.find string.len \
+			 string.lower string.rep string.sub string.upper string.format string.gfind string.gsub \
+			 table.concat table.foreach table.foreachi table.getn table.sort table.insert table.remove table.setn \
+			 math.abs math.acos math.asin math.atan math.atan2 math.ceil math.cos math.deg math.exp \
+			 math.floor math.frexp math.ldexp math.log math.log10 math.max math.min math.mod \
+			 math.pi math.pow math.rad math.random math.randomseed math.sin math.sqrt math.tan \
+			 string.gmatch string.match string.reverse table.maxn \
+			 math.cosh math.fmod math.modf math.sinh math.tanh math.huge");
+	get_keyfile_keywords(config, config_home, "keywords", "coroutines", GEANY_FILETYPES_LUA, 3,
+			"openfile closefile readfrom writeto appendto remove rename flush seek tmpfile tmpname \
+			 read write clock date difftime execute exit getenv setlocale time coroutine.create \
+			 coroutine.resume coroutine.status coroutine.wrap coroutine.yield io.close io.flush \
+			 io.input io.lines io.open io.output io.read io.tmpfile io.type io.write io.stdin \
+			 io.stdout io.stderr os.clock os.date os.difftime os.execute os.exit os.getenv \
+			 os.remove os.rename os.setlocale os.time os.tmpname coroutine.running package.cpath \
+			 package.loaded package.loadlib package.path package.preload package.seeall io.popen");
+	get_keyfile_keywords(config, config_home, "keywords", "user1", GEANY_FILETYPES_LUA, 4, "");
+	get_keyfile_keywords(config, config_home, "keywords", "user2", GEANY_FILETYPES_LUA, 5, "");
+	get_keyfile_keywords(config, config_home, "keywords", "user3", GEANY_FILETYPES_LUA, 6, "");
+	get_keyfile_keywords(config, config_home, "keywords", "user4", GEANY_FILETYPES_LUA, 7, "");
+	style_sets[GEANY_FILETYPES_LUA].keywords[8] = NULL;
+
+	get_keyfile_wordchars(config, config_home,
+		&style_sets[GEANY_FILETYPES_LUA].wordchars);
+	filetypes_get_config(config, config_home, GEANY_FILETYPES_LUA);
+
+	g_key_file_free(config);
+	g_key_file_free(config_home);
+}
+
+
+void styleset_lua(ScintillaObject *sci)
+{
+	styleset_common(sci, 5);
+	if (style_sets[GEANY_FILETYPES_LUA].styling == NULL) styleset_lua_init();
+
+	SSM(sci, SCI_SETWORDCHARS, 0, (sptr_t) style_sets[GEANY_FILETYPES_LUA].wordchars);
+	SSM(sci, SCI_AUTOCSETMAXHEIGHT, app->autocompletion_max_height, 0);
+
+	SSM(sci, SCI_SETLEXER, SCLEX_LUA, 0);
+
+	SSM(sci, SCI_SETKEYWORDS, 0, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[0]);
+	SSM(sci, SCI_SETKEYWORDS, 1, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[1]);
+	SSM(sci, SCI_SETKEYWORDS, 2, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[2]);
+	SSM(sci, SCI_SETKEYWORDS, 3, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[3]);
+	SSM(sci, SCI_SETKEYWORDS, 4, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[4]);
+	SSM(sci, SCI_SETKEYWORDS, 5, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[5]);
+	SSM(sci, SCI_SETKEYWORDS, 6, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[6]);
+	SSM(sci, SCI_SETKEYWORDS, 7, (sptr_t) style_sets[GEANY_FILETYPES_LUA].keywords[7]);
+
+	set_sci_style(sci, STYLE_DEFAULT, GEANY_FILETYPES_VHDL, 0);
+	set_sci_style(sci, SCE_LUA_DEFAULT, GEANY_FILETYPES_LUA, 0);
+	set_sci_style(sci, SCE_LUA_COMMENT, GEANY_FILETYPES_LUA, 1);
+	set_sci_style(sci, SCE_LUA_COMMENTLINE, GEANY_FILETYPES_LUA, 2);
+	set_sci_style(sci, SCE_LUA_COMMENTDOC, GEANY_FILETYPES_LUA, 3);
+	set_sci_style(sci, SCE_LUA_NUMBER, GEANY_FILETYPES_LUA, 4);
+	set_sci_style(sci, SCE_LUA_WORD, GEANY_FILETYPES_LUA, 5);
+	set_sci_style(sci, SCE_LUA_STRING, GEANY_FILETYPES_LUA, 6);
+	set_sci_style(sci, SCE_LUA_CHARACTER, GEANY_FILETYPES_LUA, 7);
+	set_sci_style(sci, SCE_LUA_LITERALSTRING, GEANY_FILETYPES_LUA, 8);
+	set_sci_style(sci, SCE_LUA_PREPROCESSOR, GEANY_FILETYPES_LUA, 9);
+	set_sci_style(sci, SCE_LUA_OPERATOR, GEANY_FILETYPES_LUA, 10);
+	set_sci_style(sci, SCE_LUA_IDENTIFIER, GEANY_FILETYPES_LUA, 11);
+	set_sci_style(sci, SCE_LUA_STRINGEOL, GEANY_FILETYPES_LUA, 12);
+	set_sci_style(sci, SCE_LUA_WORD2, GEANY_FILETYPES_LUA, 13);
+	set_sci_style(sci, SCE_LUA_WORD3, GEANY_FILETYPES_LUA, 14);
+	set_sci_style(sci, SCE_LUA_WORD4, GEANY_FILETYPES_LUA, 15);
+	set_sci_style(sci, SCE_LUA_WORD5, GEANY_FILETYPES_LUA, 16);
+	set_sci_style(sci, SCE_LUA_WORD6, GEANY_FILETYPES_LUA, 17);
+	set_sci_style(sci, SCE_LUA_WORD7, GEANY_FILETYPES_LUA, 18);
+	set_sci_style(sci, SCE_LUA_WORD8, GEANY_FILETYPES_LUA, 19);
+}

Modified: trunk/src/highlighting.h
===================================================================
--- trunk/src/highlighting.h	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/src/highlighting.h	2007-01-13 20:16:09 UTC (rev 1181)
@@ -1,7 +1,8 @@
 /*
  *      highlighting.h - this file is part of Geany, a fast and lightweight IDE
  *
- *      Copyright 2006 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2005-2007 Enrico Troeger <enrico.troeger at uvena.de>
+ *      Copyright 2006-2007 Nick Treleaven <nick.treleaven at btinternet.com>
  *
  *      This program is free software; you can redistribute it and/or modify
  *      it under the terms of the GNU General Public License as published by
@@ -90,4 +91,8 @@
 
 void styleset_vhdl(ScintillaObject *sci);
 
+void styleset_js(ScintillaObject *sci);
+
+void styleset_lua(ScintillaObject *sci);
+
 #endif

Modified: trunk/src/sci_cb.c
===================================================================
--- trunk/src/sci_cb.c	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/src/sci_cb.c	2007-01-13 20:16:09 UTC (rev 1181)
@@ -1906,6 +1906,17 @@
 				result = TRUE;
 			break;
 		}
+		case SCLEX_LUA:
+		{
+			if (style == SCE_LUA_COMMENT ||
+				style == SCE_LUA_COMMENTLINE ||
+				style == SCE_LUA_COMMENTDOC ||
+				style == SCE_LUA_LITERALSTRING ||
+				style == SCE_LUA_CHARACTER ||
+				style == SCE_LUA_STRING)
+				result = TRUE;
+			break;
+		}
 		case SCLEX_HTML:
 		{
 			if (style == SCE_HPHP_SIMPLESTRING ||

Modified: trunk/src/templates.c
===================================================================
--- trunk/src/templates.c	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/src/templates.c	2007-01-13 20:16:09 UTC (rev 1181)
@@ -360,6 +360,12 @@
 			break;
 		}
 
+		case GEANY_FILETYPES_JS:
+		{
+			line_prefix = "//";
+			break;
+		}
+
 		case GEANY_FILETYPES_LATEX:
 		{
 			line_prefix = "%";
@@ -367,6 +373,7 @@
 		}
 
 		case GEANY_FILETYPES_VHDL:
+		case GEANY_FILETYPES_LUA:
 		{
 			line_prefix = "--";
 			break;

Modified: trunk/tagmanager/Makefile.am
===================================================================
--- trunk/tagmanager/Makefile.am	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/tagmanager/Makefile.am	2007-01-13 20:16:09 UTC (rev 1181)
@@ -32,6 +32,8 @@
 	diff.c\
 	docbook.c\
 	fortran.c\
+	js.c\
+	lua.c\
 	make.c\
 	asm.c\
 	latex.c\

Added: trunk/tagmanager/js.c
===================================================================
--- trunk/tagmanager/js.c	                        (rev 0)
+++ trunk/tagmanager/js.c	2007-01-13 20:16:09 UTC (rev 1181)
@@ -0,0 +1,1247 @@
+/*
+ *	 Copyright (c) 2003, Darren Hiebert
+ *
+ *	 This source code is released for free distribution under the terms of the
+ *	 GNU General Public License.
+ *
+ *	 This module contains functions for generating tags for JavaScript language
+ *	 files.
+ *
+ *	 This is a good reference for different forms of the function statement:
+ *		 http://www.permadi.com/tutorial/jsFunc/
+ */
+
+/*
+ *	 INCLUDE FILES
+ */
+#include "general.h"	/* must always come first */
+#include <ctype.h>	/* to define isalpha () */
+#include <setjmp.h>
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+#include "main.h"
+#include "entry.h"
+#include "keyword.h"
+#include "parse.h"
+#include "read.h"
+#include "vstring.h"
+
+/*
+ *	 MACROS
+ */
+#define isType(token,t)		(boolean) ((token)->type == (t))
+#define isKeyword(token,k)	(boolean) ((token)->keyword == (k))
+
+/*
+ *	 DATA DECLARATIONS
+ */
+
+typedef enum eException { ExceptionNone, ExceptionEOF } exception_t;
+
+/*
+ * Tracks class and function names already created
+ */
+static stringList *ClassNames;
+static stringList *FunctionNames;
+
+/*	Used to specify type of keyword.
+*/
+typedef enum eKeywordId {
+	KEYWORD_NONE = -1,
+	KEYWORD_function,
+	KEYWORD_capital_function,
+	KEYWORD_prototype,
+	KEYWORD_var,
+	KEYWORD_new,
+	KEYWORD_this,
+	KEYWORD_for,
+	KEYWORD_while,
+	KEYWORD_do,
+	KEYWORD_if,
+	KEYWORD_else,
+	KEYWORD_switch
+} keywordId;
+
+/*	Used to determine whether keyword is valid for the token language and
+ *	what its ID is.
+ */
+typedef struct sKeywordDesc {
+	const char *name;
+	keywordId id;
+} keywordDesc;
+
+typedef enum eTokenType {
+	TOKEN_UNDEFINED,
+	TOKEN_CHARACTER,
+	TOKEN_CLOSE_PAREN,
+	TOKEN_SEMICOLON,
+	TOKEN_COLON,
+	TOKEN_COMMA,
+	TOKEN_KEYWORD,
+	TOKEN_OPEN_PAREN,
+	TOKEN_OPERATOR,
+	TOKEN_IDENTIFIER,
+	TOKEN_STRING,
+	TOKEN_PERIOD,
+	TOKEN_OPEN_CURLY,
+	TOKEN_CLOSE_CURLY,
+	TOKEN_EQUAL_SIGN,
+	TOKEN_FORWARD_SLASH
+} tokenType;
+
+typedef struct sTokenInfo {
+	tokenType	type;
+	keywordId	keyword;
+	vString *	string;
+	vString *	scope;
+	unsigned long lineNumber;
+	fpos_t filePosition;
+	int			nestLevel;
+} tokenInfo;
+
+/*
+ *	DATA DEFINITIONS
+ */
+
+static langType Lang_js;
+
+static jmp_buf Exception;
+
+typedef enum {
+	JSTAG_FUNCTION,
+	JSTAG_CLASS,
+	JSTAG_METHOD,
+	JSTAG_VARIABLE,
+	JSTAG_COUNT
+} jsKind;
+
+static kindOption JsKinds [] = {
+	{ TRUE,  'f', "function",	  "functions"			   },
+	{ TRUE,  'c', "class",		  "classes"			   },
+	{ TRUE,  'm', "method",		  "methods"			   },
+	{ TRUE,  'v', "variable",	  "global variables"	   }
+};
+
+static const keywordDesc JsKeywordTable [] = {
+	/* keyword		keyword ID */
+	{ "function",	KEYWORD_function			},
+	{ "Function",	KEYWORD_capital_function	},
+	{ "prototype",	KEYWORD_prototype			},
+	{ "var",		KEYWORD_var					},
+	{ "new",		KEYWORD_new					},
+	{ "this",		KEYWORD_this				},
+	{ "for",		KEYWORD_for					},
+	{ "while",		KEYWORD_while				},
+	{ "do",			KEYWORD_do					},
+	{ "if",			KEYWORD_if					},
+	{ "else",		KEYWORD_else				},
+	{ "switch",		KEYWORD_switch				}
+};
+
+/*
+ *	 FUNCTION DEFINITIONS
+ */
+
+/* Recursive functions */
+static void parseFunction (tokenInfo *const token);
+static boolean parseBlock (tokenInfo *const token, tokenInfo *const parent);
+static boolean parseLine (tokenInfo *const token, boolean is_inside_class);
+
+static boolean isIdentChar1 (const int c)
+{
+	/*
+	 * Other databases are less restrictive on the first character of
+	 * an identifier.
+	 * isIdentChar1 is used to identify the first character of an
+	 * identifier, so we are removing some restrictions.
+	 */
+	return (boolean)
+		(isalpha (c) || c == '@' || c == '_' );
+}
+
+static boolean isIdentChar (const int c)
+{
+	return (boolean)
+		(isalpha (c) || isdigit (c) || c == '$' ||
+		 c == '@' || c == '_' || c == '#');
+}
+
+static void buildJsKeywordHash (void)
+{
+	const size_t count = sizeof (JsKeywordTable) /
+		sizeof (JsKeywordTable [0]);
+	size_t i;
+	for (i = 0	;  i < count  ;  ++i)
+	{
+		const keywordDesc* const p = &JsKeywordTable [i];
+		addKeyword (p->name, Lang_js, (int) p->id);
+	}
+}
+
+static tokenInfo *newToken (void)
+{
+	tokenInfo *const token = xMalloc (1, tokenInfo);
+
+	token->type			= TOKEN_UNDEFINED;
+	token->keyword		= KEYWORD_NONE;
+	token->string		= vStringNew ();
+	token->scope		= vStringNew ();
+	token->nestLevel	= 0;
+
+	return token;
+}
+
+static void deleteToken (tokenInfo *const token)
+{
+	vStringDelete (token->string);
+	vStringDelete (token->scope);
+	eFree (token);
+}
+
+/*
+ *	 Tag generation functions
+ */
+
+static void makeConstTag (tokenInfo *const token, const jsKind kind)
+{
+	if (JsKinds [kind].enabled)
+	{
+		const char *const name = vStringValue (token->string);
+		tagEntryInfo e;
+		initTagEntry (&e, name);
+
+		e.lineNumber   = token->lineNumber;
+		e.filePosition = token->filePosition;
+		e.kindName	   = JsKinds [kind].name;
+		e.kind		   = JsKinds [kind].letter;
+
+		makeTagEntry (&e);
+	}
+}
+
+static void makeJsTag (tokenInfo *const token, const jsKind kind)
+{
+	vString *	fulltag;
+
+	if (JsKinds [kind].enabled)
+	{
+		/*
+		 * If a scope has been added to the token, change the token
+		 * string to include the scope when making the tag.
+		 */
+		if ( vStringLength(token->scope) > 0 )
+		{
+			fulltag = vStringNew ();
+			vStringCopy(fulltag, token->scope);
+			vStringCatS (fulltag, ".");
+			vStringCatS (fulltag, vStringValue(token->string));
+			vStringTerminate(fulltag);
+			vStringCopy(token->string, fulltag);
+			vStringDelete (fulltag);
+		}
+		makeConstTag (token, kind);
+	}
+}
+
+static void makeClassTag (tokenInfo *const token)
+{
+	if ( ! stringListHas(ClassNames, vStringValue (token->string)) )
+	{
+		stringListAdd (ClassNames, vStringNewCopy (token->string));
+		makeJsTag (token, JSTAG_CLASS);
+	}
+}
+
+static void makeFunctionTag (tokenInfo *const token)
+{
+	if ( ! stringListHas(FunctionNames, vStringValue (token->string)) )
+	{
+		stringListAdd (FunctionNames, vStringNewCopy (token->string));
+		makeJsTag (token, JSTAG_FUNCTION);
+	}
+}
+
+/*
+ *	 Parsing functions
+ */
+
+static int skipToCharacter (const int c)
+{
+	int d;
+	do
+	{
+		d = fileGetc ();
+	} while (d != EOF  &&  d != c);
+	return d;
+}
+
+static void parseString (vString *const string, const int delimiter)
+{
+	boolean end = FALSE;
+	int c;
+	while (! end)
+	{
+		c = fileGetc ();
+		if (c == EOF)
+			end = TRUE;
+		else if (c == delimiter)
+			end = TRUE;
+		else
+			vStringPut (string, c);
+	}
+	vStringTerminate (string);
+}
+
+/*	Read a C identifier beginning with "firstChar" and places it into
+ *	"name".
+ */
+static void parseIdentifier (vString *const string, const int firstChar)
+{
+	int c = firstChar;
+	Assert (isIdentChar1 (c));
+	do
+	{
+		vStringPut (string, c);
+		c = fileGetc ();
+	} while (isIdentChar (c));
+	vStringTerminate (string);
+	if (!isspace (c))
+		fileUngetc (c);		/* unget non-identifier character */
+}
+
+static keywordId analyzeToken (vString *const name)
+{
+	static vString *keyword = NULL;
+	if (keyword == NULL)
+		keyword = vStringNew ();
+	vStringCopyToLower (keyword, name);
+	return (keywordId) lookupKeyword (vStringValue (keyword), Lang_js);
+}
+
+static void readToken (tokenInfo *const token)
+{
+	int c;
+
+	token->type			= TOKEN_UNDEFINED;
+	token->keyword		= KEYWORD_NONE;
+	vStringClear (token->string);
+
+getNextChar:
+	do
+	{
+		c = fileGetc ();
+		/*
+		 * Added " to the list of ignores, not sure what this
+		 * might break but it gets by this issue:
+		 *	  create table "t1" (...)
+		 */
+	}
+	while (c == '\t'  ||  c == ' ' ||  c == '\n');
+
+	switch (c)
+	{
+		case EOF: longjmp (Exception, (int)ExceptionEOF);	break;
+		case '(': token->type = TOKEN_OPEN_PAREN;		break;
+		case ')': token->type = TOKEN_CLOSE_PAREN;		break;
+		case ';': token->type = TOKEN_SEMICOLON;		break;
+		case ',': token->type = TOKEN_COMMA;			break;
+		case '.': token->type = TOKEN_PERIOD;				break;
+		case ':': token->type = TOKEN_COLON;			break;
+		case '{': token->type = TOKEN_OPEN_CURLY;		break;
+		case '}': token->type = TOKEN_CLOSE_CURLY;		break;
+		case '=': token->type = TOKEN_EQUAL_SIGN;			break;
+
+		case '\'':
+		case '"':
+				  token->type = TOKEN_STRING;
+				  parseString (token->string, c);
+				  token->lineNumber = getSourceLineNumber ();
+				  token->filePosition = getInputFilePosition ();
+				  break;
+
+		case '/':
+				  {
+					  int d = fileGetc ();
+					  if ( (d != '*') &&		/* is this the start of a comment? */
+							  (d != '/') )		/* is a one line comment? */
+					  {
+						  token->type = TOKEN_FORWARD_SLASH;
+						  fileUngetc (d);
+					  }
+					  else
+					  {
+						  if (d == '*')
+						  {
+							  do
+							  {
+								  skipToCharacter ('*');
+								  c = fileGetc ();
+								  if (c == '/')
+									  break;
+								  else
+									  fileUngetc (c);
+							  } while (c != EOF && c != '\0');
+							  goto getNextChar;
+						  }
+						  else if (d == '/')	/* is this the start of a comment?  */
+						  {
+							  skipToCharacter ('\n');
+							  goto getNextChar;
+						  }
+					  }
+					  break;
+				  }
+
+		default:
+				  if (! isIdentChar1 (c))
+					  token->type = TOKEN_UNDEFINED;
+				  else
+				  {
+					  parseIdentifier (token->string, c);
+					  token->lineNumber = getSourceLineNumber ();
+					  token->filePosition = getInputFilePosition ();
+					  token->keyword = analyzeToken (token->string);
+					  if (isKeyword (token, KEYWORD_NONE))
+						  token->type = TOKEN_IDENTIFIER;
+					  else
+						  token->type = TOKEN_KEYWORD;
+				  }
+				  break;
+	}
+}
+
+static void copyToken (tokenInfo *const dest, tokenInfo *const src)
+{
+	dest->nestLevel = src->nestLevel;
+	dest->lineNumber = src->lineNumber;
+	dest->filePosition = src->filePosition;
+	dest->type = src->type;
+	dest->keyword = src->keyword;
+	vStringCopy(dest->string, src->string);
+	vStringCopy(dest->scope, src->scope);
+}
+
+/*
+ *	 Token parsing functions
+ */
+
+static void skipArgumentList (tokenInfo *const token)
+{
+	int nest_level = 0;
+
+	/*
+	 * Other databases can have arguments with fully declared
+	 * datatypes:
+	 *	 (	name varchar(30), text binary(10)  )
+	 * So we must check for nested open and closing parantheses
+	 */
+
+	if (isType (token, TOKEN_OPEN_PAREN))	/* arguments? */
+	{
+		nest_level++;
+		while (! (isType (token, TOKEN_CLOSE_PAREN) && (nest_level == 0)))
+		{
+			readToken (token);
+			if (isType (token, TOKEN_OPEN_PAREN))
+			{
+				nest_level++;
+			}
+			if (isType (token, TOKEN_CLOSE_PAREN))
+			{
+				if (nest_level > 0)
+				{
+					nest_level--;
+				}
+			}
+		}
+		readToken (token);
+	}
+}
+
+static void addContext (tokenInfo* const parent, const tokenInfo* const child)
+{
+	if (vStringLength (parent->string) > 0)
+	{
+		vStringCatS (parent->string, ".");
+	}
+	vStringCatS (parent->string, vStringValue(child->string));
+	vStringTerminate(parent->string);
+}
+
+static void addToScope (tokenInfo* const token, vString* const extra)
+{
+	if (vStringLength (token->scope) > 0)
+	{
+		vStringCatS (token->scope, ".");
+	}
+	vStringCatS (token->scope, vStringValue(extra));
+	vStringTerminate(token->scope);
+}
+
+/*
+ *	 Scanning functions
+ */
+
+static void findCmdTerm (tokenInfo *const token)
+{
+	/*
+	 * Read until we find either a semicolon or closing brace.
+	 * Any nested braces will be handled within.
+	 */
+	while (! ( isType (token, TOKEN_SEMICOLON) ||
+				isType (token, TOKEN_CLOSE_CURLY) ) )
+	{
+		/* Handle nested blocks */
+		if ( isType (token, TOKEN_OPEN_CURLY))
+		{
+			parseBlock (token, token);
+		}
+		else
+		{
+			readToken (token);
+		}
+	}
+}
+
+static void parseSwitch (tokenInfo *const token)
+{
+	/*
+	 * switch (expression){
+	 * case value1:
+	 *	   statement;
+	 *	   break;
+	 * case value2:
+	 *	   statement;
+	 *	   break;
+	 * default : statement;
+	 * }
+	 */
+
+	readToken (token);
+
+	if (isType (token, TOKEN_OPEN_PAREN))
+	{
+		/*
+		 * Handle nameless functions, these will only
+		 * be considered methods.
+		 */
+		skipArgumentList(token);
+	}
+
+	if (isType (token, TOKEN_OPEN_CURLY))
+	{
+		/*
+		 * This will be either a function or a class.
+		 * We can only determine this by checking the body
+		 * of the function.  If we find a "this." we know
+		 * it is a class, otherwise it is a function.
+		 */
+		parseBlock (token, token);
+	}
+
+}
+
+static void parseLoop (tokenInfo *const token)
+{
+	/*
+	 * Handles these statements
+	 *	   for (x=0; x<3; x++)
+	 *		   document.write("This text is repeated three times<br>");
+	 *
+	 *	   for (x=0; x<3; x++)
+	 *	   {
+	 *		   document.write("This text is repeated three times<br>");
+	 *	   }
+	 *
+	 *	   while (number<5){
+	 *		   document.write(number+"<br>");
+	 *		   number++;
+	 *	   }
+	 *
+	 *	   do{
+	 *		   document.write(number+"<br>");
+	 *		   number++;
+	 *	   }
+	 *	   while (number<5);
+	 */
+
+	if (isKeyword (token, KEYWORD_for) || isKeyword (token, KEYWORD_while))
+	{
+		readToken(token);
+
+		if (isType (token, TOKEN_OPEN_PAREN))
+		{
+			/*
+			 * Handle nameless functions, these will only
+			 * be considered methods.
+			 */
+			skipArgumentList(token);
+		}
+
+		if (isType (token, TOKEN_OPEN_CURLY))
+		{
+			/*
+			 * This will be either a function or a class.
+			 * We can only determine this by checking the body
+			 * of the function.  If we find a "this." we know
+			 * it is a class, otherwise it is a function.
+			 */
+			parseBlock (token, token);
+		}
+		else
+		{
+			parseLine(token, FALSE);
+		}
+	}
+	else if (isKeyword (token, KEYWORD_do))
+	{
+		readToken(token);
+
+		if (isType (token, TOKEN_OPEN_CURLY))
+		{
+			/*
+			 * This will be either a function or a class.
+			 * We can only determine this by checking the body
+			 * of the function.  If we find a "this." we know
+			 * it is a class, otherwise it is a function.
+			 */
+			parseBlock (token, token);
+		}
+		else
+		{
+			parseLine(token, FALSE);
+		}
+
+		readToken(token);
+
+		if (isKeyword (token, KEYWORD_while))
+		{
+			readToken(token);
+
+			if (isType (token, TOKEN_OPEN_PAREN))
+			{
+				/*
+				 * Handle nameless functions, these will only
+				 * be considered methods.
+				 */
+				skipArgumentList(token);
+			}
+		}
+	}
+}
+
+static void parseIf (tokenInfo *const token)
+{
+	/*
+	 * If statements have two forms
+	 *	   if ( ... )
+	 *		   one line;
+	 *
+	 *	   if ( ... ) {
+	 *		  multiple;
+	 *		  statements;
+	 *	   }
+	 *
+	 *	   if ( ... ) {
+	 *		  return elem
+	 *	   }
+	 *
+	 * TODO:  Deal with statements that can optional end
+	 *		  without a semi-colon.  Currently this messes up
+	 *		  the parsing of blocks.
+	 *		  Need to somehow detect this has happened, and either
+	 *		  backup a token, or skip reading the next token if
+	 *		  that is possible from all code locations.
+	 *
+	 */
+
+	readToken (token);
+
+	if (isType (token, TOKEN_OPEN_PAREN))
+	{
+		/*
+		 * Handle nameless functions, these will only
+		 * be considered methods.
+		 */
+		skipArgumentList(token);
+	}
+
+	if (isType (token, TOKEN_OPEN_CURLY))
+	{
+		/*
+		 * This will be either a function or a class.
+		 * We can only determine this by checking the body
+		 * of the function.  If we find a "this." we know
+		 * it is a class, otherwise it is a function.
+		 */
+		parseBlock (token, token);
+	}
+	else
+	{
+		findCmdTerm (token);
+	}
+}
+
+static void parseFunction (tokenInfo *const token)
+{
+	tokenInfo *const name = newToken ();
+	boolean is_class = FALSE;
+
+	/*
+	 * This deals with these formats
+	 *	   function validFunctionTwo(a,b) {}
+	 */
+
+	readToken (name);
+	/* Add scope in case this is an INNER function */
+	addToScope(name, token->scope);
+
+	readToken (token);
+	if (isType (token, TOKEN_PERIOD))
+	{
+		do
+		{
+			readToken (token);
+			if ( isKeyword(token, KEYWORD_NONE) )
+			{
+				addContext (name, token);
+				readToken (token);
+			}
+		} while (isType (token, TOKEN_PERIOD));
+	}
+
+	if ( isType (token, TOKEN_OPEN_PAREN) )
+		skipArgumentList(token);
+
+	if ( isType (token, TOKEN_OPEN_CURLY) )
+	{
+		is_class = parseBlock (token, name);
+		if ( is_class )
+			makeClassTag (name);
+		else
+			makeFunctionTag (name);
+	}
+
+	findCmdTerm (token);
+
+	deleteToken (name);
+}
+
+static boolean parseBlock (tokenInfo *const token, tokenInfo *const parent)
+{
+	boolean is_class = FALSE;
+	boolean read_next_token = TRUE;
+	vString * saveScope = vStringNew ();
+
+	token->nestLevel++;
+	/*
+	 * Make this routine a bit more forgiving.
+	 * If called on an open_curly advance it
+	 */
+	if ( isType (token, TOKEN_OPEN_CURLY) &&
+			isKeyword(token, KEYWORD_NONE) )
+		readToken(token);
+
+	if (! isType (token, TOKEN_CLOSE_CURLY))
+	{
+		/*
+		 * Read until we find the closing brace,
+		 * any nested braces will be handled within
+		 */
+		do
+		{
+			read_next_token = TRUE;
+			if (isKeyword (token, KEYWORD_this))
+			{
+				/*
+				 * Then we are inside a class and we have found
+				 * a class, not a function
+				 */
+				is_class = TRUE;
+				vStringCopy(saveScope, token->scope);
+				addToScope (token, parent->string);
+				/* Move past this */
+				readToken(token);
+
+				/* Move past a potential . */
+				if ( isType (token, TOKEN_PERIOD) )
+					readToken(token);
+
+				parseLine (token, is_class);
+				vStringCopy(token->scope, saveScope);
+			}
+			else if (isKeyword (token, KEYWORD_var))
+			{
+				/*
+				 * Potentially we have found an inner function.
+				 * Set something to indicate the scope
+				 */
+				vStringCopy(saveScope, token->scope);
+				addToScope (token, parent->string);
+				parseLine (token, is_class);
+				vStringCopy(token->scope, saveScope);
+			}
+			else if (isKeyword (token, KEYWORD_function))
+			{
+				vStringCopy(saveScope, token->scope);
+				addToScope (token, parent->string);
+				parseFunction (token);
+				vStringCopy(token->scope, saveScope);
+			}
+			else if (isType (token, TOKEN_OPEN_CURLY))
+			{
+				/* Handle nested blocks */
+				parseBlock (token, parent);
+			}
+			else
+			{
+				/*
+				 * It is possible for a line to have no terminator
+				 * if the following line is a closing brace.
+				 * parseLine will detect this case and indicate
+				 * whether we should read an additional token.
+				 */
+				read_next_token = parseLine (token, is_class);
+			}
+
+			/*
+			 * Always read a new token unless we find a statement without
+			 * a ending terminator
+			 */
+			if( read_next_token )
+				readToken(token);
+
+			/*
+			 * If we find a statement without a terminator consider the
+			 * block finished, otherwise the stack will be off by one.
+			 */
+		} while (! isType (token, TOKEN_CLOSE_CURLY) && read_next_token );
+	}
+
+	vStringDelete(saveScope);
+	token->nestLevel--;
+
+	return is_class;
+}
+
+static void parseMethods (tokenInfo *const token, tokenInfo *const class)
+{
+	tokenInfo *const name = newToken ();
+
+	/*
+	 * This deals with these formats
+	 *	   'validMethod' : function(a,b) {}
+	 */
+
+	do
+	{
+		readToken (token);
+		if (isType (token, TOKEN_STRING))
+		{
+			copyToken(name, token);
+
+			readToken (token);
+			if ( isType (token, TOKEN_COLON) )
+			{
+				readToken (token);
+				if ( isKeyword (token, KEYWORD_function) )
+				{
+					readToken (token);
+					if ( isType (token, TOKEN_OPEN_PAREN) )
+					{
+						skipArgumentList(token);
+					}
+
+					if (isType (token, TOKEN_OPEN_CURLY))
+					{
+						addToScope (name, class->string);
+						makeJsTag (name, JSTAG_METHOD);
+						parseBlock (token, name);
+
+						/*
+						 * Read to the closing curly, check next
+						 * token, if comma, we must loop again
+						 */
+						readToken (token);
+					}
+				}
+			}
+		}
+	} while ( isType(token, TOKEN_COMMA) );
+
+	findCmdTerm (token);
+
+	deleteToken (name);
+}
+
+static boolean parseStatement (tokenInfo *const token, boolean is_inside_class)
+{
+	tokenInfo *const name = newToken ();
+	tokenInfo *const secondary_name = newToken ();
+	vString * saveScope = vStringNew ();
+	boolean is_class = FALSE;
+	boolean is_terminated = TRUE;
+	boolean is_global = FALSE;
+
+	vStringClear(saveScope);
+	/*
+	 * Functions can be named or unnamed.
+	 * This deals with these formats:
+	 * Function
+	 *	   validFunctionOne = function(a,b) {}
+	 *	   testlib.validFunctionFive = function(a,b) {}
+	 *	   var innerThree = function(a,b) {}
+	 *	   var innerFour = (a,b) {}
+	 *	   var D2 = secondary_fcn_name(a,b) {}
+	 *	   var D3 = new Function("a", "b", "return a+b;");
+	 * Class
+	 *	   testlib.extras.ValidClassOne = function(a,b) {
+	 *		   this.a = a;
+	 *	   }
+	 * Class Methods
+	 *	   testlib.extras.ValidClassOne.prototype = {
+	 *		   'validMethodOne' : function(a,b) {},
+	 *		   'validMethodTwo' : function(a,b) {}
+	 *	   }
+	 *	   Database.prototype.getTodaysDate = Database_getTodaysDate;
+	 */
+
+	if ( is_inside_class )
+		is_class = TRUE;
+	/*
+	 * var can preceed an inner function
+	 */
+	if ( isKeyword(token, KEYWORD_var) )
+	{
+		/*
+		 * Only create variables for global scope
+		 */
+		if ( token->nestLevel == 0 )
+		{
+			is_global = TRUE;
+		}
+		readToken(token);
+	}
+
+	copyToken(name, token);
+
+	/* Potentially the name of the function */
+	readToken (token);
+	if (isType (token, TOKEN_PERIOD))
+	{
+		/*
+		 * Cannot be a global variable is it has dot references in the name
+		 */
+		is_global = FALSE;
+		do
+		{
+			readToken (token);
+			if ( isKeyword(token, KEYWORD_NONE) )
+			{
+				if ( is_class )
+				{
+					vStringCopy(saveScope, token->scope);
+					addToScope(token, name->string);
+					makeJsTag (token, JSTAG_METHOD);
+
+					/* Find to the end of the statement */
+					findCmdTerm (token);
+					goto cleanUp;
+				}
+				else
+					addContext (name, token);
+			}
+			else if ( isKeyword(token, KEYWORD_prototype) )
+			{
+				makeClassTag (name);
+				is_class = TRUE;
+			}
+			readToken (token);
+		} while (isType (token, TOKEN_PERIOD));
+	}
+
+	if ( isType (token, TOKEN_EQUAL_SIGN) )
+	{
+		readToken (token);
+
+		if ( isKeyword (token, KEYWORD_function) )
+		{
+			readToken (token);
+
+			if ( isKeyword (token, KEYWORD_NONE) &&
+					! isType (token, TOKEN_OPEN_PAREN) )
+			{
+				/*
+				 * Functions of this format:
+				 *	   var D2A=function theAdd(a, b)
+				 *	   {
+				 *		  return a+b;
+				 *	   }
+				 * Are really two separately defined functions and
+				 * can be referenced in two ways:
+				 *	   alert(D2A(1,2));			  // produces 3
+				 *	   alert(theAdd(1,2));		  // also produces 3
+				 * So it must have two tags:
+				 *	   D2A
+				 *	   theAdd
+				 * Save the reference to the name for later use, once
+				 * we have established this is a valid function we will
+				 * create the secondary reference to it.
+				 */
+				copyToken(secondary_name, token);
+				readToken (token);
+			}
+
+			if ( isType (token, TOKEN_OPEN_PAREN) )
+				skipArgumentList(token);
+
+			if (isType (token, TOKEN_OPEN_CURLY))
+			{
+				/*
+				 * This will be either a function or a class.
+				 * We can only determine this by checking the body
+				 * of the function.  If we find a "this." we know
+				 * it is a class, otherwise it is a function.
+				 */
+				if ( is_inside_class )
+				{
+					makeJsTag (name, JSTAG_METHOD);
+					if ( vStringLength(secondary_name->string) > 0 )
+						makeFunctionTag (secondary_name);
+					parseBlock (token, name);
+				}
+				else
+				{
+					is_class = parseBlock (token, name);
+					if ( is_class )
+						makeClassTag (name);
+					else
+						makeFunctionTag (name);
+
+					if ( vStringLength(secondary_name->string) > 0 )
+						makeFunctionTag (secondary_name);
+				}
+			}
+		}
+		else if (isType (token, TOKEN_OPEN_PAREN))
+		{
+			/*
+			 * Handle nameless functions, these will only
+			 * be considered methods.
+			 */
+			skipArgumentList(token);
+
+			if (isType (token, TOKEN_OPEN_CURLY))
+			{
+				/*
+				 * This will be either a function or a class.
+				 * We can only determine this by checking the body
+				 * of the function.  If we find a "this." we know
+				 * it is a class, otherwise it is a function.
+				 */
+				makeJsTag (name, JSTAG_METHOD);
+				parseBlock (token, name);
+			}
+		}
+		else if (isType (token, TOKEN_OPEN_CURLY))
+		{
+			parseMethods(token, name);
+		}
+		else if (isKeyword (token, KEYWORD_new))
+		{
+			readToken (token);
+			if ( isKeyword (token, KEYWORD_function) ||
+					isKeyword (token, KEYWORD_capital_function) )
+			{
+				readToken (token);
+				if ( isType (token, TOKEN_OPEN_PAREN) )
+					skipArgumentList(token);
+
+				if (isType (token, TOKEN_SEMICOLON))
+					makeFunctionTag (name);
+			}
+		}
+		else if (isKeyword (token, KEYWORD_NONE))
+		{
+			/*
+			 * Only create variables for global scope
+			 */
+			if ( token->nestLevel == 0 && is_global )
+			{
+				/*
+				 * A pointer can be created to the function.
+				 * If we recognize the function/class name ignore the variable.
+				 * This format looks identical to a variable definition.
+				 * A variable defined outside of a block is considered
+				 * a global variable:
+				 *	   var g_var1 = 1;
+				 *	   var g_var2;
+				 * This is not a global variable:
+				 *	   var g_var = function;
+				 * This is a global variable:
+				 *	   var g_var = different_var_name;
+				 */
+				if ( ! stringListHas(FunctionNames, vStringValue (token->string)) &&
+						! stringListHas(ClassNames, vStringValue (token->string)) )
+				{
+					readToken (token);
+					if (isType (token, TOKEN_SEMICOLON))
+						makeJsTag (name, JSTAG_VARIABLE);
+				}
+			}
+		}
+	}
+	else
+	{
+		/*
+		 * Only create variables for global scope
+		 */
+		if ( token->nestLevel == 0 && is_global )
+		{
+			/*
+			 * Handles this syntax:
+			 *	   var g_var2;
+			 */
+			if (isType (token, TOKEN_SEMICOLON))
+				makeJsTag (name, JSTAG_VARIABLE);
+		}
+	}
+	findCmdTerm (token);
+
+	/*
+	 * Statements can be optionally terminated in the case of
+	 * statement prior to a close curly brace as in the
+	 * document.write line below:
+	 *
+	 * function checkForUpdate() {
+	 *	   if( 1==1 ) {
+	 *		   document.write("hello from checkForUpdate<br>")
+	 *	   }
+	 *	   return 1;
+	 * }
+	 */
+	if (isType (token, TOKEN_CLOSE_CURLY))
+		is_terminated = FALSE;
+
+
+cleanUp:
+	vStringCopy(token->scope, saveScope);
+	deleteToken (name);
+	deleteToken (secondary_name);
+	vStringDelete(saveScope);
+
+	return is_terminated;
+}
+
+static boolean parseLine (tokenInfo *const token, boolean is_inside_class)
+{
+	boolean is_terminated = TRUE;
+	/*
+	 * Detect the common statements, if, while, for, do, ...
+	 * This is necessary since the last statement within a block "{}"
+	 * can be optionally terminated.
+	 *
+	 * If the statement is not terminated, we need to tell
+	 * the calling routine to prevent reading an additional token
+	 * looking for the end of the statement.
+	 */
+
+	if (isType(token, TOKEN_KEYWORD))
+	{
+		switch (token->keyword)
+		{
+			case KEYWORD_for:
+			case KEYWORD_while:
+			case KEYWORD_do:
+				parseLoop (token);
+				break;
+			case KEYWORD_if:
+			case KEYWORD_else:
+				parseIf (token);
+				break;
+			case KEYWORD_switch:
+				parseSwitch (token);
+				break;
+			default:
+				parseStatement (token, is_inside_class);
+				break;
+		}
+	}
+	else
+	{
+		/*
+		 * Special case where single line statements may not be
+		 * SEMICOLON termianted.  parseBlock needs to know this
+		 * so that it does not read the next token.
+		 */
+		is_terminated = parseStatement (token, is_inside_class);
+	}
+	return is_terminated;
+}
+
+static void parseJsFile (tokenInfo *const token)
+{
+	do
+	{
+		readToken (token);
+
+		if (isType(token, TOKEN_KEYWORD))
+		{
+			switch (token->keyword)
+			{
+				case KEYWORD_function:	parseFunction (token); break;
+				default:				parseLine (token, FALSE); break;
+			}
+		}
+		else
+		{
+			parseLine (token, FALSE);
+		}
+	} while (TRUE);
+}
+
+static void initialize (const langType language)
+{
+	Assert (sizeof (JsKinds) / sizeof (JsKinds [0]) == JSTAG_COUNT);
+	Lang_js = language;
+	buildJsKeywordHash ();
+}
+
+static void findJsTags (void)
+{
+	tokenInfo *const token = newToken ();
+	exception_t exception = (exception_t) (setjmp (Exception));
+	ClassNames = stringListNew ();
+	FunctionNames = stringListNew ();
+
+	while (exception == ExceptionNone)
+		parseJsFile (token);
+
+	stringListDelete (ClassNames);
+	stringListDelete (FunctionNames);
+	ClassNames = NULL;
+	FunctionNames = NULL;
+	deleteToken (token);
+}
+
+/* Create parser definition stucture */
+extern parserDefinition* JavaScriptParser (void)
+{
+	static const char *const extensions [] = { "js", NULL };
+
+	parserDefinition *const def = parserNew ("Javascript");
+	def->extensions = extensions;
+	/*
+	 * New definitions for parsing instead of regex
+	 */
+	def->kinds		= JsKinds;
+	def->kindCount	= KIND_COUNT (JsKinds);
+	def->parser		= findJsTags;
+	def->initialize = initialize;
+
+	return def;
+}
+
+/* vi:set tabstop=4 shiftwidth=4 noexpandtab: */

Added: trunk/tagmanager/lua.c
===================================================================
--- trunk/tagmanager/lua.c	                        (rev 0)
+++ trunk/tagmanager/lua.c	2007-01-13 20:16:09 UTC (rev 1181)
@@ -0,0 +1,132 @@
+/*
+*   $Id$
+*
+*   Copyright (c) 2000-2001, Max Ischenko <mfi at ukr.net>.
+*
+*   This source code is released for free distribution under the terms of the
+*   GNU General Public License.
+*
+*   This module contains functions for generating tags for Lua language.
+*/
+
+/*
+*   INCLUDE FILES
+*/
+#include "general.h"  /* must always come first */
+
+#include <string.h>
+
+#include "parse.h"
+#include "read.h"
+#include "vstring.h"
+
+/*
+*   DATA DEFINITIONS
+*/
+typedef enum {
+	K_FUNCTION
+} luaKind;
+
+static kindOption LuaKinds [] = {
+	{ TRUE, 'f', "function", "functions" }
+};
+
+/*
+*   FUNCTION DEFINITIONS
+*/
+
+/* for debugging purposes */
+static void __unused__ print_string (char *p, char *q)
+{
+	for ( ; p != q; p++)
+		fprintf (errout, "%c", *p);
+	fprintf (errout, "\n");
+}
+
+/*
+ * Helper function.
+ * Returns 1 if line looks like a line of Lua code.
+ *
+ * TODO: Recognize UNIX bang notation.
+ * (Lua treat first line as a comment if it starts with #!)
+ *
+ */
+static boolean is_a_code_line (const unsigned char *line)
+{
+	boolean result;
+	const unsigned char *p = line;
+	while (isspace ((int) *p))
+		p++;
+	if (p [0] == '\0')
+		result = FALSE;
+	else if (p [0] == '-' && p [1] == '-')
+		result = FALSE;
+	else
+		result = TRUE;
+	return result;
+}
+
+static void extract_name (const char *begin, const char *end, vString *name)
+{
+	if (begin != NULL  &&  end != NULL  &&  begin < end)
+	{
+		const char *cp;
+
+		while (isspace ((int) *begin))
+			begin++;
+		while (isspace ((int) *end))
+			end--;
+		if (begin < end)
+		{
+			for (cp = begin ; cp != end; cp++)
+				vStringPut (name, (int) *cp);
+			vStringTerminate (name);
+
+			makeSimpleTag (name, LuaKinds, K_FUNCTION);
+			vStringClear (name);
+		}
+	}
+}
+
+static void findLuaTags (void)
+{
+	vString *name = vStringNew ();
+	const unsigned char *line;
+
+	while ((line = fileReadLine ()) != NULL)
+	{
+		const char *p, *q;
+
+		if (! is_a_code_line (line))
+			continue;
+
+		p = (const char*) strstr ((const char*) line, "function");
+		if (p == NULL)
+			continue;
+
+		q = strchr ((const char*) line, '=');
+
+		if (q == NULL) {
+			p = p + 9;  /* skip the `function' word */
+			q = strchr ((const char*) p, '(');
+			extract_name (p, q, name);
+		} else {
+			p = (const char*) &line[0];
+			extract_name (p, q, name);
+		}
+	}
+	vStringDelete (name);
+}
+
+extern parserDefinition* LuaParser (void)
+{
+	static const char* const extensions [] = { "lua", NULL };
+	parserDefinition* def = parserNew ("Lua");
+	def->kinds      = LuaKinds;
+	def->kindCount  = KIND_COUNT (LuaKinds);
+	def->extensions = extensions;
+	def->parser     = findLuaTags;
+	return def;
+}
+
+/* vi:set tabstop=4 shiftwidth=4: */

Modified: trunk/tagmanager/makefile.win32
===================================================================
--- trunk/tagmanager/makefile.win32	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/tagmanager/makefile.win32	2007-01-13 20:16:09 UTC (rev 1181)
@@ -32,7 +32,7 @@
 clean:
 	-$(RM) deps.mak *.o $(COMPLIB)
 
-$(COMPLIB): args.o c.o fortran.o make.o conf.o pascal.o perl.o php.o diff.o vhdl.o \
+$(COMPLIB): args.o c.o fortran.o make.o conf.o pascal.o perl.o php.o diff.o vhdl.o lua.o js.o \
 python.o regex.o sh.o ctags.o entry.o get.o keyword.o options.o parse.o \
 read.o sort.o strlist.o latex.o docbook.o tcl.o ruby.o asm.o sql.o css.o vstring.o tm_workspace.o tm_work_object.o \
 tm_source_file.o tm_project.o tm_tag.o tm_symbol.o tm_file_entry.o \

Modified: trunk/tagmanager/parsers.h
===================================================================
--- trunk/tagmanager/parsers.h	2007-01-13 15:25:45 UTC (rev 1180)
+++ trunk/tagmanager/parsers.h	2007-01-13 20:16:09 UTC (rev 1181)
@@ -36,7 +36,9 @@
     FortranParser, \
     FeriteParser, \
     DiffParser, \
-    VhdlParser
+    VhdlParser, \
+    LuaParser, \
+    JavaScriptParser
 
 /*
 langType of each parser
@@ -62,6 +64,8 @@
 19	FeriteParser
 20	DiffParser
 21	VhdlParser
+22	LuaParser
+23	JavaScriptParser
 */
 #endif	/* _PARSERS_H */
 


This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.



More information about the Commits mailing list