Branch: refs/heads/master Author: Colomban Wendling ban@herbesfolles.org Committer: Colomban Wendling ban@herbesfolles.org Date: Tue, 29 Jul 2014 16:23:11 UTC Commit: aea113f1c5095fa28790f1c330f1c5220d813d59 https://github.com/geany/geany/commit/aea113f1c5095fa28790f1c330f1c5220d813d...
Log Message: ----------- Merge pull request #306 from SiegeLord/more_rust_updates
Modified Paths: -------------- data/filetypes.rust tagmanager/ctags/rust.c tests/ctags/test_input.rs tests/ctags/test_input.rs.tags tests/ctags/test_input2.rs tests/ctags/test_input2.rs.tags
Modified: data/filetypes.rust 2 lines changed, 1 insertions(+), 1 deletions(-) =================================================================== @@ -22,7 +22,7 @@ lexerror=error
[keywords] # all items must be in one line -primary=alignof as be box break const continue crate do else enum extern false fn for if impl in let loop match mod mut offsetof once priv proc pub pure ref return self sizeof static struct super trait true type typeof unsafe unsized use while yield +primary=alignof as be box break const continue crate do else enum extern false fn for if impl in let loop match mod mut offsetof once priv proc pub pure ref return self sizeof static struct super trait true type typeof unsafe unsized use virtual while yield secondary=bool char f32 f64 i16 i32 i64 i8 int str u16 u32 u64 u8 uint tertiary=Self
Modified: tagmanager/ctags/rust.c 108 lines changed, 75 insertions(+), 33 deletions(-) =================================================================== @@ -24,7 +24,7 @@ /* * MACROS */ -#define MAX_STRING_LENGTH 64 +#define MAX_STRING_LENGTH 256
/* * DATA DECLARATIONS @@ -117,9 +117,7 @@ static void writeCurTokenToStr (lexerState *lexer, vString *out_str) vStringCat(out_str, lexer->token_str); break; case TOKEN_STRING: - vStringPut(out_str, '"'); vStringCat(out_str, lexer->token_str); - vStringPut(out_str, '"'); break; case TOKEN_WHITESPACE: vStringPut(out_str, ' '); @@ -152,6 +150,14 @@ static void advanceNChar (lexerState *lexer, int n) advanceChar(lexer); }
+/* Store the current character in lexerState::token_str if there is space + * (set by MAX_STRING_LENGTH), and then read the next character from the file */ +static void advanceAndStoreChar (lexerState *lexer) +{ + if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) + vStringPut(lexer->token_str, (char) lexer->cur_c); + advanceChar(lexer); +}
static boolean isWhitespace (int c) { @@ -182,19 +188,30 @@ static void scanWhitespace (lexerState *lexer) }
/* Normal line comments start with two /'s and continue until the next \n - * (NOT any other newline character!). Additionally, a shebang in the beginning - * of the file also counts as a line comment. + * (potentially after a \r). Additionally, a shebang in the beginning of the + * file also counts as a line comment as long as it is not this sequence: #![ . * Block comments start with / followed by a * and end with a * followed by a /. * Unlike in C/C++ they nest. */ static void scanComments (lexerState *lexer) { - /* // or #! */ - if (lexer->next_c == '/' || lexer->next_c == '!') + /* // */ + if (lexer->next_c == '/') { advanceNChar(lexer, 2); while (lexer->cur_c != EOF && lexer->cur_c != '\n') advanceChar(lexer); } + /* #! */ + else if (lexer->next_c == '!') + { + advanceNChar(lexer, 2); + /* If it is exactly #![ then it is not a comment, but an attribute */ + if (lexer->cur_c == '[') + return; + while (lexer->cur_c != EOF && lexer->cur_c != '\n') + advanceChar(lexer); + } + /* block comment */ else if (lexer->next_c == '*') { int level = 1; @@ -224,8 +241,7 @@ static void scanIdentifier (lexerState *lexer) vStringClear(lexer->token_str); do { - vStringPut(lexer->token_str, (char) lexer->cur_c); - advanceChar(lexer); + advanceAndStoreChar(lexer); } while(lexer->cur_c != EOF && isIdentifierContinue(lexer->cur_c)); }
@@ -237,16 +253,14 @@ static void scanIdentifier (lexerState *lexer) static void scanString (lexerState *lexer) { vStringClear(lexer->token_str); - advanceChar(lexer); + advanceAndStoreChar(lexer); while (lexer->cur_c != EOF && lexer->cur_c != '"') { if (lexer->cur_c == '\' && lexer->next_c == '"') - advanceChar(lexer); - if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) - vStringPut(lexer->token_str, (char) lexer->cur_c); - advanceChar(lexer); + advanceAndStoreChar(lexer); + advanceAndStoreChar(lexer); } - advanceChar(lexer); + advanceAndStoreChar(lexer); }
/* Raw strings look like this: r"" or r##""## where the number of @@ -255,52 +269,75 @@ static void scanRawString (lexerState *lexer) { size_t num_initial_hashes = 0; vStringClear(lexer->token_str); - advanceChar(lexer); + advanceAndStoreChar(lexer); /* Count how many leading hashes there are */ while (lexer->cur_c == '#') { num_initial_hashes++; - advanceChar(lexer); + advanceAndStoreChar(lexer); } if (lexer->cur_c != '"') return; - advanceChar(lexer); + advanceAndStoreChar(lexer); while (lexer->cur_c != EOF) { - if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) - vStringPut(lexer->token_str, (char) lexer->cur_c); /* Count how many trailing hashes there are. If the number is equal or more * than the number of leading hashes, break. */ if (lexer->cur_c == '"') { size_t num_trailing_hashes = 0; - advanceChar(lexer); + advanceAndStoreChar(lexer); while (lexer->cur_c == '#' && num_trailing_hashes < num_initial_hashes) { num_trailing_hashes++;
- if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH) - vStringPut(lexer->token_str, (char) lexer->cur_c); - advanceChar(lexer); + advanceAndStoreChar(lexer); } if (num_trailing_hashes == num_initial_hashes) - { - /* Strip the trailing hashes and quotes */ - if (vStringLength(lexer->token_str) < MAX_STRING_LENGTH && vStringLength(lexer->token_str) > num_trailing_hashes + 1) - { - lexer->token_str->length = vStringLength(lexer->token_str) - num_trailing_hashes - 1; - lexer->token_str->buffer[lexer->token_str->length] = '\0'; - } break; - } } else { - advanceChar(lexer); + advanceAndStoreChar(lexer); } } }
+/* This deals with character literals: 'n', '\n', '\uFFFF'; and lifetimes: + * 'lifetime. We'll use this approximate regexp for the literals: + * ' \ [^']+ ' or ' [^'] ' or ' \ ' '. Either way, we'll treat this + * token as a string, so it gets preserved as is for function signatures with + * lifetimes. */ +static void scanCharacterOrLifetime (lexerState *lexer) +{ + vStringClear(lexer->token_str); + advanceAndStoreChar(lexer); + + if (lexer->cur_c == '\') + { + advanceAndStoreChar(lexer); + /* The ' \ ' ' (literally ''') case */ + if (lexer->cur_c == ''' && lexer->next_c == ''') + { + advanceAndStoreChar(lexer); + advanceAndStoreChar(lexer); + } + /* The ' \ [^']+ ' case */ + else + { + while (lexer->cur_c != EOF && lexer->cur_c != ''') + advanceAndStoreChar(lexer); + } + } + /* The ' [^'] ' case */ + else if (lexer->cur_c != ''' && lexer->next_c == ''') + { + advanceAndStoreChar(lexer); + advanceAndStoreChar(lexer); + } + /* Otherwise it is malformed, or a lifetime */ +} + /* Advances the parser one token, optionally skipping whitespace * (otherwise it is concatenated and returned as a single whitespace token). * Whitespace is needed to properly render function signatures. Unrecognized @@ -343,6 +380,11 @@ static int advanceToken (lexerState *lexer, boolean skip_whitspace) scanRawString(lexer); return lexer->cur_token = TOKEN_STRING; } + else if (lexer->cur_c == ''') + { + scanCharacterOrLifetime(lexer); + return lexer->cur_token = TOKEN_STRING; + } else if (isIdentifierStart(lexer->cur_c)) { scanIdentifier(lexer);
Modified: tests/ctags/test_input.rs 31 lines changed, 21 insertions(+), 10 deletions(-) =================================================================== @@ -1,11 +1,22 @@ #! fn ignored_in_comment() {} -#[feature(globs)]; -#[feature(macro_rules)]; +#![feature(globs)] +#![feature(macro_rules)] use std::*; use std::io::stdio::println; use test_input2::*; mod test_input2;
+fn lifetime_and_char<'lifetime>(_: &'lifetime int) +{ + let s = '"'; + let s = '}'; + let s = '''; + let s = '\uffff'; + fn not_hidden_by_char() {} +} + +fn preserve_string_delims(_bar: extern r#"C"# fn()) {} + pub struct A { foo: fn() -> int, @@ -44,18 +55,18 @@ macro_rules! test_macro
macro_rules! ignore (($($x:tt)*) => (()))
-fn yada(a:int,c:Foo,b:test_input2::fruit::SomeStruct) -> ~str { - a.to_str() +fn yada(a:int,c:Foo,b:test_input2::fruit::SomeStruct) -> String { + a.to_string() }
fn main() { use test_input2::fruit::*; - io::println(foo_bar_test_func(SomeStruct{red_value:1,green_value:2,blue_value:3},(4,5)).to_str()); + io::println(foo_bar_test_func(SomeStruct{red_value:1,green_value:2,blue_value:3},(4,5)).to_string().as_slice()); let a=Foo{foo_field_1:2}; a.my_method(1); let c=a_cat(3); let d=Foo{foo_field_1:a.foo_field_1+2}; a.test(); - println(a.foo_field_1.to_str()); + println(a.foo_field_1.to_string().as_slice()); ignore! ( fn ignored_inside_macro() {} @@ -111,21 +122,21 @@ trait DoZ {
impl Testable for Foo { fn test(&self) { - println(self.foo_field_1.to_str()); + println(self.foo_field_1.to_string().as_slice()); }
fn test1(&self) { - println(self.foo_field_1.to_str()); + println(self.foo_field_1.to_string().as_slice()); }
fn test2(&self) { - println(self.foo_field_1.to_str()); + println(self.foo_field_1.to_string().as_slice()); } }
impl DoZ for Foo { fn do_z(&self) { - println(self.foo_field_1.to_str()); + println(self.foo_field_1.to_string().as_slice()); } }
Modified: tests/ctags/test_input.rs.tags 5 lines changed, 4 insertions(+), 1 deletions(-) =================================================================== @@ -27,10 +27,13 @@ foo foo_field_1�8�Foo�0 gfunc�16�<X:Testable+DoZ>(x:&X)�0 ignore�65536�0 +lifetime_and_char�16�<'lifetime>(_: &'lifetime int)�0 main�16�()�0 my_method�128�(&self,_:int)�Foo�0 nested�16�()�main�0 +not_hidden_by_char�16�()�lifetime_and_char�0 only_field�8�S1�0 +preserve_string_delims�16�(_bar: extern r#"C"# fn())�0 size�16384�0 some2�16�(a:Animal)�0 test�128�(&self)�Foo�0 @@ -46,4 +49,4 @@ test_macro x�8�Foo2�0 x�8�TraitedStructTest�0 y�8�Foo2�0 -yada�16�(a:int,c:Foo,b:test_input2::fruit::SomeStruct) -> ~str�0 +yada�16�(a:int,c:Foo,b:test_input2::fruit::SomeStruct) -> String�0
Modified: tests/ctags/test_input2.rs 3 lines changed, 2 insertions(+), 1 deletions(-) =================================================================== @@ -1,6 +1,7 @@ -use std::io::stdio::println; +#![cfg(not(test))] fn not_hashbang() {}
pub fn foo_bar_test_func(apples:fruit::SomeStruct,(oranges,lemon):(int,int))->int{ + use std::io::stdio::println; let some_var_name=2*oranges; let a=SomeLongStructName{v:0}; println("a");println("b"); println("c");
Modified: tests/ctags/test_input2.rs.tags 1 lines changed, 1 insertions(+), 0 deletions(-) =================================================================== @@ -14,6 +14,7 @@ granite green_value�8�fruit::SomeStruct�0 limestone�16�()�mineral�0 mineral�256�0 +not_hashbang�16�()�0 red_value�8�fruit::SomeStruct�0 v�8�SomeLongStructName�0 veg�256�0
-------------- This E-Mail was brought to you by github_commit_mail.py (Source: https://github.com/geany/infrastructure).