[geany/geany] ccb15a: Add the go ctags parser

Jiří Techet git-noreply at xxxxx
Sun Nov 30 00:35:00 UTC 2014


Branch:      refs/heads/master
Author:      Jiří Techet <techet at gmail.com>
Committer:   Jiří Techet <techet at gmail.com>
Date:        Sun, 30 Nov 2014 00:35:00 UTC
Commit:      ccb15a31be106273a19a6c3d4dfa29f3adbb17a8
             https://github.com/geany/geany/commit/ccb15a31be106273a19a6c3d4dfa29f3adbb17a8

Log Message:
-----------
Add the go ctags parser

Make go one of the builtin filetypes, add the parser and update the related
source and config files. While there, remove Rust from [Groups] in
filetype_extensions.conf because it's already a builtin filetype as well.

The parser itself is stolen from the fishman/ctags repo.


Modified Paths:
--------------
    data/Makefile.am
    data/filetype_extensions.conf
    data/filetypes.go
    src/filetypes.c
    src/filetypes.h
    src/highlighting.c
    src/highlightingmappings.h
    src/symbols.c
    tagmanager/ctags/Makefile.am
    tagmanager/ctags/go.c
    tagmanager/ctags/makefile.win32
    tagmanager/ctags/parsers.h
    tagmanager/src/tm_parser.h
    tests/ctags/Makefile.am
    tests/ctags/test.go
    tests/ctags/test.go.tags
    wscript

Modified: data/Makefile.am
2 lines changed, 1 insertions(+), 1 deletions(-)
===================================================================
@@ -33,7 +33,7 @@ filetypes = \
 	filetypes.freebasic \
 	filetypes.Genie.conf \
 	filetypes.glsl \
-	filetypes.Go.conf \
+	filetypes.go \
 	filetypes.Graphviz.conf \
 	filetypes.haskell \
 	filetypes.haxe \


Modified: data/filetype_extensions.conf
2 lines changed, 1 insertions(+), 1 deletions(-)
===================================================================
@@ -69,7 +69,7 @@ None=*;
 
 # Note: restarting is required after editing groups
 [Groups]
-Programming=Clojure;CUDA;Cython;Genie;Go;Rust;Scala;
+Programming=Clojure;CUDA;Cython;Genie;Scala;
 Script=Graphviz;
 Markup=
 Misc=


Modified: data/filetypes.go
1 lines changed, 0 insertions(+), 1 deletions(-)
===================================================================
@@ -13,7 +13,6 @@ docComment=a addindex addtogroup anchor arg attention author authors b brief bug
 [lexer_properties=C]
 
 [settings]
-lexer_filetype=C
 
 # default extension used when saving files
 extension=go


Modified: src/filetypes.c
1 lines changed, 1 insertions(+), 0 deletions(-)
===================================================================
@@ -190,6 +190,7 @@ static void init_builtin_filetypes(void)
 	FT_INIT( BATCH,      NONE,         "Batch",            NULL,                      SCRIPT,      SCRIPT   );
 	FT_INIT( POWERSHELL, NONE,         "PowerShell",       NULL,                      SOURCE_FILE, SCRIPT   );
 	FT_INIT( RUST,       RUST,         "Rust",             NULL,                      SOURCE_FILE, COMPILED );
+	FT_INIT( GO,         GO,           "Go",               NULL,                      SOURCE_FILE, COMPILED );
 }
 
 


Modified: src/filetypes.h
1 lines changed, 1 insertions(+), 0 deletions(-)
===================================================================
@@ -100,6 +100,7 @@ typedef enum
 	GEANY_FILETYPES_BATCH,
 	GEANY_FILETYPES_POWERSHELL,
 	GEANY_FILETYPES_RUST,
+	GEANY_FILETYPES_GO,
 	/* ^ append items here */
 	GEANY_MAX_BUILT_IN_FILETYPES	/* Don't use this, use filetypes_array->len instead */
 }


Modified: src/highlighting.c
2 lines changed, 2 insertions(+), 0 deletions(-)
===================================================================
@@ -1029,6 +1029,7 @@ void highlighting_init_styles(guint filetype_idx, GKeyFile *config, GKeyFile *co
 		init_styleset_case(F77);
 		init_styleset_case(FORTH);
 		init_styleset_case(FORTRAN);
+		init_styleset_case(GO);
 		init_styleset_case(HASKELL);
 		init_styleset_case(HAXE);
 		init_styleset_case(AS);
@@ -1112,6 +1113,7 @@ void highlighting_set_styles(ScintillaObject *sci, GeanyFiletype *ft)
 		styleset_case(F77);
 		styleset_case(FORTH);
 		styleset_case(FORTRAN);
+		styleset_case(GO);
 		styleset_case(HASKELL);
 		styleset_case(HAXE);
 		styleset_case(AS);


Modified: src/highlightingmappings.h
7 lines changed, 7 insertions(+), 0 deletions(-)
===================================================================
@@ -624,6 +624,13 @@ static const HLKeyword highlighting_keywords_FORTH[] =
 #define highlighting_properties_FORTRAN		highlighting_properties_F77
 
 
+/* Go */
+#define highlighting_lexer_GO		SCLEX_CPP
+#define highlighting_styles_GO		highlighting_styles_C
+#define highlighting_keywords_GO	highlighting_keywords_C
+#define highlighting_properties_GO	highlighting_properties_C
+
+
 /* Haskell */
 #define highlighting_lexer_HASKELL			SCLEX_HASKELL
 static const HLStyle highlighting_styles_HASKELL[] =


Modified: src/symbols.c
12 lines changed, 12 insertions(+), 0 deletions(-)
===================================================================
@@ -762,6 +762,18 @@ static void add_top_level_items(GeanyDocument *doc)
 				NULL);
 			break;
 		}
+		case GEANY_FILETYPES_GO:
+		{
+			tag_list_add_groups(tag_store,
+				&(tv_iters.tag_namespace), _("Package"), "classviewer-namespace",
+				&(tv_iters.tag_function), _("Functions"), "classviewer-method",
+				&(tv_iters.tag_macro), _("Constants"), "classviewer-macro",
+				&(tv_iters.tag_type), _("Types"), "classviewer-struct",
+				&(tv_iters.tag_variable), _("Variables"), "classviewer-var",
+				&(tv_iters.tag_other), _("Other"), "classviewer-other", NULL,
+				NULL);
+			break;
+		}
 		case GEANY_FILETYPES_PERL:
 		{
 			tag_list_add_groups(tag_store,


Modified: tagmanager/ctags/Makefile.am
1 lines changed, 1 insertions(+), 0 deletions(-)
===================================================================
@@ -24,6 +24,7 @@ parsers = \
 	diff.c \
 	docbook.c \
 	fortran.c \
+	go.c \
 	haskell.c \
 	haxe.c \
 	html.c \


Modified: tagmanager/ctags/go.c
687 lines changed, 687 insertions(+), 0 deletions(-)
===================================================================
@@ -0,0 +1,687 @@
+/*
+*   INCLUDE FILES
+*/
+#include "general.h"        /* must always come first */
+
+#include "entry.h"
+#include "keyword.h"
+#include "read.h"
+#include "main.h"
+#include "vstring.h"
+#include "options.h"
+
+/*
+ *	 MACROS
+ */
+#define isType(token,t) (boolean) ((token)->type == (t))
+#define isKeyword(token,k) (boolean) ((token)->keyword == (k))
+
+/*
+ *	 DATA DECLARATIONS
+ */
+
+typedef enum eKeywordId {
+	KEYWORD_NONE = -1,
+	KEYWORD_package,
+	KEYWORD_import,
+	KEYWORD_const,
+	KEYWORD_type,
+	KEYWORD_var,
+	KEYWORD_func,
+	KEYWORD_struct,
+	KEYWORD_interface,
+	KEYWORD_map,
+	KEYWORD_chan
+} keywordId;
+
+/*  Used to determine whether keyword is valid for the current language and
+ *  what its ID is.
+ */
+typedef struct sKeywordDesc {
+	const char *name;
+	keywordId id;
+} keywordDesc;
+
+typedef enum eTokenType {
+	TOKEN_NONE = -1,
+	// Token not important for top-level Go parsing
+	TOKEN_OTHER,
+	TOKEN_KEYWORD,
+	TOKEN_IDENTIFIER,
+	TOKEN_STRING,
+	TOKEN_OPEN_PAREN,
+	TOKEN_CLOSE_PAREN,
+	TOKEN_OPEN_CURLY,
+	TOKEN_CLOSE_CURLY,
+	TOKEN_OPEN_SQUARE,
+	TOKEN_CLOSE_SQUARE,
+	TOKEN_SEMICOLON,
+	TOKEN_STAR,
+	TOKEN_LEFT_ARROW,
+	TOKEN_DOT,
+	TOKEN_COMMA,
+	TOKEN_EOF
+} tokenType;
+
+typedef struct sTokenInfo {
+	tokenType type;
+	keywordId keyword;
+	vString *string;		/* the name of the token */
+	unsigned long lineNumber;	/* line number of tag */
+	MIOPos filePosition;		/* file position of line containing name */
+} tokenInfo;
+
+/*
+*   DATA DEFINITIONS
+*/
+
+static int Lang_go;
+static vString *scope;
+
+typedef enum {
+	GOTAG_UNDEFINED = -1,
+	GOTAG_PACKAGE,
+	GOTAG_FUNCTION,
+	GOTAG_CONST,
+	GOTAG_TYPE,
+	GOTAG_VAR,
+} goKind;
+
+static kindOption GoKinds[] = {
+	{TRUE, 'p', "namespace", "packages"},
+	{TRUE, 'f', "function", "functions"},
+	{TRUE, 'c', "macro", "constants"},
+	{TRUE, 't', "typedef", "types"},
+	{TRUE, 'v', "variable", "variables"}
+};
+
+static keywordDesc GoKeywordTable[] = {
+	{"package", KEYWORD_package},
+	{"import", KEYWORD_import},
+	{"const", KEYWORD_const},
+	{"type", KEYWORD_type},
+	{"var", KEYWORD_var},
+	{"func", KEYWORD_func},
+	{"struct", KEYWORD_struct},
+	{"interface", KEYWORD_interface},
+	{"map", KEYWORD_map},
+	{"chan", KEYWORD_chan}
+};
+
+/*
+*   FUNCTION DEFINITIONS
+*/
+
+// XXX UTF-8
+static boolean isStartIdentChar (const int c)
+{
+	return (boolean)
+		(isalpha (c) ||  c == '_' || c > 128);
+}
+
+static boolean isIdentChar (const int c)
+{
+	return (boolean)
+		(isStartIdentChar (c) || isdigit (c));
+}
+
+static void initialize (const langType language)
+{
+	size_t i;
+	const size_t count =
+		sizeof (GoKeywordTable) / sizeof (GoKeywordTable[0]);
+	Lang_go = language;
+	for (i = 0; i < count; ++i)
+	{
+		const keywordDesc *const p = &GoKeywordTable[i];
+		addKeyword (p->name, language, (int) p->id);
+	}
+}
+
+static tokenInfo *newToken (void)
+{
+	tokenInfo *const token = xMalloc (1, tokenInfo);
+	token->type = TOKEN_NONE;
+	token->keyword = KEYWORD_NONE;
+	token->string = vStringNew ();
+	token->lineNumber = getSourceLineNumber ();
+	token->filePosition = getInputFilePosition ();
+	return token;
+}
+
+static void deleteToken (tokenInfo * const token)
+{
+	if (token != NULL)
+	{
+		vStringDelete (token->string);
+		eFree (token);
+	}
+}
+
+/*
+ *   Parsing functions
+ */
+
+static void parseString (vString *const string, const int delimiter)
+{
+	boolean end = FALSE;
+	while (!end)
+	{
+		int c = fileGetc ();
+		if (c == EOF)
+			end = TRUE;
+		else if (c == '\\' && delimiter != '`')
+		{
+			c = fileGetc ();
+			if (c != '\'' && c != '\"')
+				vStringPut (string, '\\');
+			vStringPut (string, c);
+		}
+		else if (c == delimiter)
+			end = TRUE;
+		else
+			vStringPut (string, c);
+	}
+	vStringTerminate (string);
+}
+
+static void parseIdentifier (vString *const string, const int firstChar)
+{
+	int c = firstChar;
+	do
+	{
+		vStringPut (string, c);
+		c = fileGetc ();
+	} while (isIdentChar (c));
+	vStringTerminate (string);
+	fileUngetc (c);		/* always unget, LF might add a semicolon */
+}
+
+static void readToken (tokenInfo *const token)
+{
+	int c;
+	static tokenType lastTokenType = TOKEN_NONE;
+
+	token->type = TOKEN_NONE;
+	token->keyword = KEYWORD_NONE;
+	vStringClear (token->string);
+
+getNextChar:
+	do
+	{
+		c = fileGetc ();
+		token->lineNumber = getSourceLineNumber ();
+		token->filePosition = getInputFilePosition ();
+		if (c == '\n' && (lastTokenType == TOKEN_IDENTIFIER ||
+						  lastTokenType == TOKEN_STRING ||
+						  lastTokenType == TOKEN_OTHER ||
+						  lastTokenType == TOKEN_CLOSE_PAREN ||
+						  lastTokenType == TOKEN_CLOSE_CURLY ||
+						  lastTokenType == TOKEN_CLOSE_SQUARE))
+		{
+			token->type = TOKEN_SEMICOLON;
+			goto done;
+		}
+	}
+	while (c == '\t'  ||  c == ' ' ||  c == '\r' || c == '\n');
+
+	switch (c)
+	{
+		case EOF:
+			token->type = TOKEN_EOF;
+			break;
+
+		case ';':
+			token->type = TOKEN_SEMICOLON;
+			break;
+
+		case '/':
+			{
+				boolean hasNewline = FALSE;
+				int d = fileGetc ();
+				switch (d)
+				{
+					case '/':
+						fileSkipToCharacter ('\n');
+						/* Line comments start with the
+						 * character sequence // and
+						 * continue through the next
+						 * newline. A line comment acts
+						 * like a newline.  */
+						fileUngetc ('\n');
+						goto getNextChar;
+					case '*':
+						do
+						{
+							do
+							{
+								d = fileGetc ();
+								if (d == '\n')
+								{
+									hasNewline = TRUE;
+								}
+							} while (d != EOF && d != '*');
+
+							c = fileGetc ();
+							if (c == '/')
+								break;
+							else
+								fileUngetc (c);
+						} while (c != EOF && c != '\0');
+
+						fileUngetc (hasNewline ? '\n' : ' ');
+						goto getNextChar;
+					default:
+						token->type = TOKEN_OTHER;
+						fileUngetc (d);
+						break;
+				}
+			}
+			break;
+
+		case '"':
+		case '\'':
+		case '`':
+			token->type = TOKEN_STRING;
+			parseString (token->string, c);
+			token->lineNumber = getSourceLineNumber ();
+			token->filePosition = getInputFilePosition ();
+			break;
+
+		case '<':
+			{
+				int d = fileGetc ();
+				if (d == '-')
+					token->type = TOKEN_LEFT_ARROW;
+				else
+				{
+					fileUngetc (d);
+					token->type = TOKEN_OTHER;
+				}
+			}
+			break;
+
+		case '(':
+			token->type = TOKEN_OPEN_PAREN;
+			break;
+
+		case ')':
+			token->type = TOKEN_CLOSE_PAREN;
+			break;
+
+		case '{':
+			token->type = TOKEN_OPEN_CURLY;
+			break;
+
+		case '}':
+			token->type = TOKEN_CLOSE_CURLY;
+			break;
+
+		case '[':
+			token->type = TOKEN_OPEN_SQUARE;
+			break;
+
+		case ']':
+			token->type = TOKEN_CLOSE_SQUARE;
+			break;
+
+		case '*':
+			token->type = TOKEN_STAR;
+			break;
+
+		case '.':
+			token->type = TOKEN_DOT;
+			break;
+
+		case ',':
+			token->type = TOKEN_COMMA;
+			break;
+
+		default:
+			if (isStartIdentChar (c))
+			{
+				parseIdentifier (token->string, c);
+				token->lineNumber = getSourceLineNumber ();
+				token->filePosition = getInputFilePosition ();
+				token->keyword = lookupKeyword (vStringValue (token->string), Lang_go);
+				if (isKeyword (token, KEYWORD_NONE))
+					token->type = TOKEN_IDENTIFIER;
+				else
+					token->type = TOKEN_KEYWORD;
+			}
+			else
+				token->type = TOKEN_OTHER;
+			break;
+	}
+
+done:
+	lastTokenType = token->type;
+}
+
+static void skipToMatched (tokenInfo *const token)
+{
+	int nest_level = 0;
+	tokenType open_token;
+	tokenType close_token;
+
+	switch (token->type)
+	{
+		case TOKEN_OPEN_PAREN:
+			open_token = TOKEN_OPEN_PAREN;
+			close_token = TOKEN_CLOSE_PAREN;
+			break;
+		case TOKEN_OPEN_CURLY:
+			open_token = TOKEN_OPEN_CURLY;
+			close_token = TOKEN_CLOSE_CURLY;
+			break;
+		case TOKEN_OPEN_SQUARE:
+			open_token = TOKEN_OPEN_SQUARE;
+			close_token = TOKEN_CLOSE_SQUARE;
+			break;
+		default:
+			return;
+	}
+
+	/*
+	 * This routine will skip to a matching closing token.
+	 * It will also handle nested tokens like the (, ) below.
+	 *   (  name varchar(30), text binary(10)  )
+	 */
+	if (isType (token, open_token))
+	{
+		nest_level++;
+		while (!(isType (token, close_token) && (nest_level == 0)) &&
+			   !isType (token, TOKEN_EOF))
+		{
+			readToken (token);
+			if (isType (token, open_token))
+			{
+				nest_level++;
+			}
+			if (isType (token, close_token))
+			{
+				if (nest_level > 0)
+				{
+					nest_level--;
+				}
+			}
+		}
+		readToken (token);
+	}
+}
+
+static void skipType (tokenInfo *const token)
+{
+again:
+	// Type      = TypeName | TypeLit | "(" Type ")" .
+	// Skips also function multiple return values "(" Type {"," Type} ")"
+	if (isType (token, TOKEN_OPEN_PAREN))
+	{
+		skipToMatched (token);
+		return;
+	}
+
+	// TypeName  = QualifiedIdent.
+	// QualifiedIdent = [ PackageName "." ] identifier .
+	// PackageName    = identifier .
+	if (isType (token, TOKEN_IDENTIFIER))
+	{
+		readToken (token);
+		if (isType (token, TOKEN_DOT))
+		{
+			readToken (token);
+			if (isType (token, TOKEN_IDENTIFIER))
+				readToken (token);
+		}
+		return;
+	}
+
+	// StructType     = "struct" "{" { FieldDecl ";" } "}"
+	// InterfaceType      = "interface" "{" { MethodSpec ";" } "}" .
+	if (isKeyword (token, KEYWORD_struct) || isKeyword (token, KEYWORD_interface))
+	{
+		readToken (token);
+		// skip over "{}"
+		skipToMatched (token);
+		return;
+	}
+
+	// ArrayType   = "[" ArrayLength "]" ElementType .
+	// SliceType = "[" "]" ElementType .
+	// ElementType = Type .
+	if (isType (token, TOKEN_OPEN_SQUARE))
+	{
+		skipToMatched (token);
+		goto again;
+	}
+
+	// PointerType = "*" BaseType .
+	// BaseType = Type .
+	// ChannelType = ( "chan" [ "<-" ] | "<-" "chan" ) ElementType .
+	if (isType (token, TOKEN_STAR) || isKeyword (token, KEYWORD_chan) || isType (token, TOKEN_LEFT_ARROW))
+	{
+		readToken (token);
+		goto again;
+	}
+
+	// MapType     = "map" "[" KeyType "]" ElementType .
+	// KeyType     = Type .
+	if (isKeyword (token, KEYWORD_map))
+	{
+		readToken (token);
+		// skip over "[]"
+		skipToMatched (token);
+		goto again;
+	}
+
+	// FunctionType   = "func" Signature .
+	// Signature      = Parameters [ Result ] .
+	// Result         = Parameters | Type .
+	// Parameters     = "(" [ ParameterList [ "," ] ] ")" .
+	if (isKeyword (token, KEYWORD_func))
+	{
+		readToken (token);
+		// Parameters, skip over "()"
+		skipToMatched (token);
+		// Result is parameters or type or nothing.  skipType treats anything
+		// surrounded by parentheses as a type, and does nothing if what
+		// follows is not a type.
+		goto again;
+	}
+}
+
+static void makeTag (tokenInfo *const token, const goKind kind)
+{
+	const char *const name = vStringValue (token->string);
+
+	tagEntryInfo e;
+	initTagEntry (&e, name);
+
+	if (!GoKinds [kind].enabled)
+		return;
+
+	e.lineNumber = token->lineNumber;
+	e.filePosition = token->filePosition;
+	e.kindName = GoKinds [kind].name;
+	e.kind = GoKinds [kind].letter;
+
+	makeTagEntry (&e);
+
+	if (scope && Option.include.qualifiedTags)
+	{
+		vString *qualifiedName = vStringNew ();
+		vStringCopy (qualifiedName, scope);
+		vStringCatS (qualifiedName, ".");
+		vStringCat (qualifiedName, token->string);
+		e.name = vStringValue (qualifiedName);
+		makeTagEntry (&e);
+		vStringDelete (qualifiedName);
+	}
+}
+
+static void parsePackage (tokenInfo *const token)
+{
+	tokenInfo *const name = newToken ();
+
+	readToken (name);
+	if (isType (name, TOKEN_IDENTIFIER))
+	{
+		makeTag (name, GOTAG_PACKAGE);
+		if (!scope && Option.include.qualifiedTags)
+		{
+			scope = vStringNew ();
+			vStringCopy (scope, name->string);
+		}
+	}
+
+	deleteToken (name);
+}
+
+static void parseFunctionOrMethod (tokenInfo *const token)
+{
+	// FunctionDecl = "func" identifier Signature [ Body ] .
+	// Body         = Block.
+	//
+	// MethodDecl   = "func" Receiver MethodName Signature [ Body ] .
+	// Receiver     = "(" [ identifier ] [ "*" ] BaseTypeName ")" .
+	// BaseTypeName = identifier .
+	tokenInfo *const name = newToken ();
+
+	// Skip over receiver.
+	readToken (name);
+	if (isType (name, TOKEN_OPEN_PAREN))
+		skipToMatched (name);
+
+	if (isType (name, TOKEN_IDENTIFIER))
+	{
+		// Skip over parameters.
+		readToken (token);
+		skipToMatched (token);
+
+		// Skip over result.
+		skipType (token);
+
+		// Skip over function body.
+		if (isType (token, TOKEN_OPEN_CURLY))
+			skipToMatched (token);
+
+		makeTag (name, GOTAG_FUNCTION);
+	}
+
+	deleteToken (name);
+}
+
+static void parseConstTypeVar (tokenInfo *const token, goKind kind)
+{
+	// ConstDecl      = "const" ( ConstSpec | "(" { ConstSpec ";" } ")" ) .
+	// ConstSpec      = IdentifierList [ [ Type ] "=" ExpressionList ] .
+	// IdentifierList = identifier { "," identifier } .
+	// ExpressionList = Expression { "," Expression } .
+	// TypeDecl     = "type" ( TypeSpec | "(" { TypeSpec ";" } ")" ) .
+	// TypeSpec     = identifier Type .
+	// VarDecl     = "var" ( VarSpec | "(" { VarSpec ";" } ")" ) .
+	// VarSpec     = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
+	tokenInfo *const name = newToken ();
+	boolean usesParens = FALSE;
+
+	readToken (name);
+
+	if (isType (name, TOKEN_OPEN_PAREN))
+	{
+		usesParens = TRUE;
+		readToken (name);
+	}
+
+again:
+	while (1)
+	{
+		if (isType (name, TOKEN_IDENTIFIER))
+		{
+			makeTag (name, kind);
+			readToken (token);
+		}
+		if (!isType (token, TOKEN_COMMA))
+			break;
+		readToken (name);
+	}
+
+	skipType (token);
+	while (!isType (token, TOKEN_SEMICOLON) && !isType (token, TOKEN_CLOSE_PAREN)
+			&& !isType (token, TOKEN_EOF))
+	{
+		readToken (token);
+		skipToMatched (token);
+	}
+
+	if (usesParens)
+	{
+		if (!isType (token, TOKEN_CLOSE_PAREN)) // we are at TOKEN_SEMICOLON
+		{
+			readToken (name);
+			if (!isType (name, TOKEN_CLOSE_PAREN) && !isType (name, TOKEN_EOF))
+				goto again;
+		}
+	}
+
+	deleteToken (name);
+}
+
+static void parseGoFile (tokenInfo *const token)
+{
+	do
+	{
+		readToken (token);
+
+		if (isType (token, TOKEN_KEYWORD))
+		{
+			switch (token->keyword)
+			{
+				case KEYWORD_package:
+					parsePackage (token);
+					break;
+				case KEYWORD_func:
+					parseFunctionOrMethod (token);
+					break;
+				case KEYWORD_const:
+					parseConstTypeVar (token, GOTAG_CONST);
+					break;
+				case KEYWORD_type:
+					parseConstTypeVar (token, GOTAG_TYPE);
+					break;
+				case KEYWORD_var:
+					parseConstTypeVar (token, GOTAG_VAR);
+					break;
+				default:
+					break;
+			}
+		}
+		else if (isType (token, TOKEN_OPEN_PAREN) || isType (token, TOKEN_OPEN_CURLY) ||
+			isType (token, TOKEN_OPEN_SQUARE))
+		{
+			skipToMatched (token);
+		}
+	} while (token->type != TOKEN_EOF);
+}
+
+static void findGoTags (void)
+{
+	tokenInfo *const token = newToken ();
+
+	parseGoFile (token);
+
+	deleteToken (token);
+	vStringDelete (scope);
+	scope = NULL;
+}
+
+extern parserDefinition *GoParser (void)
+{
+	static const char *const extensions[] = { "go", NULL };
+	parserDefinition *def = parserNew ("Go");
+	def->kinds = GoKinds;
+	def->kindCount = KIND_COUNT (GoKinds);
+	def->extensions = extensions;
+	def->parser = findGoTags;
+	def->initialize = initialize;
+	return def;
+}


Modified: tagmanager/ctags/makefile.win32
2 lines changed, 1 insertions(+), 1 deletions(-)
===================================================================
@@ -49,7 +49,7 @@ actionscript.o nsis.o objc.o \
 haskell.o haxe.o html.o python.o lregex.o asciidoc.o rest.o sh.o ctags.o entry.o get.o keyword.o nestlevel.o \
 options.o \
 parse.o basic.o read.o sort.o strlist.o latex.o markdown.o matlab.o docbook.o tcl.o ruby.o rust.o asm.o sql.o txt2tags.o css.o \
-vstring.o r.o
+vstring.o r.o go.o
 	$(AR) rc $@ $^
 	$(RANLIB) $@
 


Modified: tagmanager/ctags/parsers.h
3 lines changed, 2 insertions(+), 1 deletions(-)
===================================================================
@@ -61,7 +61,8 @@
     ObjcParser, \
     AsciidocParser, \
     AbaqusParser, \
-    RustParser
+    RustParser, \
+    GoParser
 
 #endif	/* _PARSERS_H */
 


Modified: tagmanager/src/tm_parser.h
1 lines changed, 1 insertions(+), 0 deletions(-)
===================================================================
@@ -68,6 +68,7 @@ typedef enum
 	TM_PARSER_ASCIIDOC,
 	TM_PARSER_ABAQUS,
 	TM_PARSER_RUST,
+	TM_PARSER_GO,
 	TM_PARSER_COUNT
 } TMParserType;
 


Modified: tests/ctags/Makefile.am
1 lines changed, 1 insertions(+), 0 deletions(-)
===================================================================
@@ -242,6 +242,7 @@ test_sources = \
 	strings.rb						\
 	structure.f						\
 	tabindent.py					\
+	test.go							\
 	test.py							\
 	test.vhd						\
 	test_input.rs					\


Modified: tests/ctags/test.go
47 lines changed, 47 insertions(+), 0 deletions(-)
===================================================================
@@ -0,0 +1,47 @@
+package main
+
+import "fmt"
+
+type (
+	T1 map[string]int
+	T2 <-chan float32
+	T3 chan []string
+	T4 chan<- *[12]string
+	T5 interface {
+		Reader()
+		Writer()
+		foo()
+	}
+)
+
+type T6 struct {
+	a, b, c, d int
+	e float32
+	//ignored int
+}
+
+const (A = iota;B;C;
+	D = iota << (1 + iota*2)
+	E
+	F=3.14*(1+2*3)/34e7;I=1)
+
+type (T7 func (a struct{_ int; _ float32}, b int) (int, map[string]int);T8 float32)
+
+var (a, b, c int
+d T5
+e T4
+f interface{})
+
+func f1() {};func f2() {};type/*no newline here*/T9 int/*var ignored int
+const ignored int*/const (G=6); var g int
+
+func (t *T1) f3() (a, b int){
+	return 1, 2
+}; var h int
+
+func (tt * T7) f4(a func () func ()) (func (), int) {return func (){}, 1};func f5(){};const H=1
+
+func main() {
+	go func (){}()
+	fmt.Println("Hello, 世界")
+}


Modified: tests/ctags/test.go.tags
34 lines changed, 34 insertions(+), 0 deletions(-)
===================================================================
@@ -0,0 +1,34 @@
+# format=tagmanager
+A�65536�0
+B�65536�0
+C�65536�0
+D�65536�0
+E�65536�0
+F�65536�0
+G�65536�0
+H�65536�0
+I�65536�0
+T1�4096�0
+T2�4096�0
+T3�4096�0
+T4�4096�0
+T5�4096�0
+T6�4096�0
+T7�4096�0
+T8�4096�0
+T9�4096�0
+a�16384�0
+b�16384�0
+c�16384�0
+d�16384�0
+e�16384�0
+f�16384�0
+f1�16�0
+f2�16�0
+f3�16�0
+f4�16�0
+f5�16�0
+g�16384�0
+h�16384�0
+main�16�0
+main�256�0


Modified: wscript
1 lines changed, 1 insertions(+), 0 deletions(-)
===================================================================
@@ -82,6 +82,7 @@ ctags_sources = set([
     'tagmanager/ctags/entry.c',
     'tagmanager/ctags/fortran.c',
     'tagmanager/ctags/get.c',
+    'tagmanager/ctags/go.c',
     'tagmanager/ctags/haskell.c',
     'tagmanager/ctags/haxe.c',
     'tagmanager/ctags/html.c',



--------------
This E-Mail was brought to you by github_commit_mail.py (Source: https://github.com/geany/infrastructure).


More information about the Commits mailing list