Scans the input string and identifies single language tokens
# File lib/mcollective/matcher/scanner.rb, line 14
14: def get_token
15: if @token_index >= @arguments.size
16: return nil
17: end
18:
19: case @arguments[@token_index]
20: when "("
21: return "(", "("
22:
23: when ")"
24: return ")", ")"
25:
26: when "n"
27: if (@arguments[@token_index + 1] == "o") && (@arguments[@token_index + 2] == "t") && ((@arguments[@token_index + 3] == " ") || (@arguments[@token_index + 3] == "("))
28: @token_index += 2
29: return "not", "not"
30: else
31: gen_statement
32: end
33:
34: when "!"
35: return "not", "not"
36:
37: when "a"
38: if (@arguments[@token_index + 1] == "n") && (@arguments[@token_index + 2] == "d") && ((@arguments[@token_index + 3] == " ") || (@arguments[@token_index + 3] == "("))
39: @token_index += 2
40: return "and", "and"
41: else
42: gen_statement
43: end
44:
45: when "o"
46: if (@arguments[@token_index + 1] == "r") && ((@arguments[@token_index + 2] == " ") || (@arguments[@token_index + 2] == "("))
47: @token_index += 1
48: return "or", "or"
49: else
50: gen_statement
51: end
52:
53: when " "
54: return " ", " "
55:
56: else
57: gen_statement
58: end
59: end
Helper generates a statement token
# File lib/mcollective/matcher/scanner.rb, line 63
63: def gen_statement
64: func = false
65: current_token_value = ""
66: j = @token_index
67:
68: begin
69: if (@arguments[j] == "/")
70: begin
71: current_token_value << @arguments[j]
72: j += 1
73: end until (j >= @arguments.size) || (@arguments[j] =~ /\s/)
74: elsif (@arguments[j] =~ /=|<|>/)
75: while !(@arguments[j] =~ /=|<|>/)
76: current_token_value << @arguments[j]
77: j += 1
78: end
79:
80: current_token_value << @arguments[j]
81: j += 1
82:
83: if @arguments[j] == "/"
84: begin
85: current_token_value << @arguments[j]
86: j += 1
87: if @arguments[j] == "/"
88: current_token_value << "/"
89: break
90: end
91: end until (j >= @arguments.size) || (@arguments[j] =~ /\//)
92: else
93: while (j < @arguments.size) && ((@arguments[j] != " ") && (@arguments[j] != ")"))
94: current_token_value << @arguments[j]
95: j += 1
96: end
97: end
98: else
99: begin
100: # Identify and tokenize regular expressions by ignoring everything between /'s
101: if @arguments[j] == '/'
102: current_token_value << '/'
103: j+=1
104: while(j < @arguments.size && @arguments[j] != '/')
105: current_token_value << @arguments[j]
106: j += 1
107: end
108: current_token_value << @arguments[j] if @arguments[j]
109: break
110: end
111: if @arguments[j+1] == "("
112: func = true
113: be_greedy = true
114: end
115: current_token_value << @arguments[j]
116: if be_greedy
117: while !(j+1 >= @arguments.size) && @arguments[j] != ')'
118: j += 1
119: current_token_value << @arguments[j]
120: end
121: j += 1
122: be_greedy = false
123: else
124: j += 1
125: end
126: if(@arguments[j] == ' ')
127: break if(is_klass?(j) && !(@arguments[j-1] =~ /=|<|>/))
128: end
129: if( (@arguments[j] == ' ') && (@seperation_counter < 2) && !(current_token_value.match(/^.+(=|<|>).+$/)) )
130: if((index = lookahead(j)))
131: j = index
132: end
133: end
134: end until (j >= @arguments.size) || (@arguments[j] =~ /\s|\)/)
135: @seperation_counter = 0
136: end
137: rescue Exception => e
138: raise "An exception was raised while trying to tokenize '#{current_token_value} - #{e}'"
139: end
140:
141: @token_index += current_token_value.size + @white_spaces - 1
142: @white_spaces = 0
143:
144: # bar(
145: if current_token_value.match(/.+?\($/)
146: return "bad_token", [@token_index - current_token_value.size + 1, @token_index]
147: # /foo/=bar
148: elsif current_token_value.match(/^\/.+?\/(<|>|=).+/)
149: return "bad_token", [@token_index - current_token_value.size + 1, @token_index]
150: elsif current_token_value.match(/^.+?\/(<|>|=).+/)
151: return "bad_token", [@token_index - current_token_value.size + 1, @token_index]
152: else
153: if func
154: if current_token_value.match(/^.+?\((\s*(')[^']*(')\s*(,\s*(')[^']*('))*)?\)(\.[a-zA-Z0-9_]+)?((!=|<=|>=|=|>|<).+)?$/) ||
155: current_token_value.match(/^.+?\((\s*(")[^"]*(")\s*(,\s*(")[^"]*("))*)?\)(\.[a-zA-Z0-9_]+)?((!=|<=|>=|=|>|<).+)?$/)
156: return "fstatement", current_token_value
157: else
158: return "bad_token", [@token_index - current_token_value.size + 1, @token_index]
159: end
160: else
161: slash_err = false
162: current_token_value.split('').each do |c|
163: if c == '/'
164: slash_err = !slash_err
165: end
166: end
167: return "bad_token", [@token_index - current_token_value.size + 1, @token_index] if slash_err
168: return "statement", current_token_value
169: end
170: end
171: end
Deal with special puppet class statement
# File lib/mcollective/matcher/scanner.rb, line 174
174: def is_klass?(j)
175: while(j < @arguments.size && @arguments[j] == ' ')
176: j += 1
177: end
178:
179: if @arguments[j] =~ /=|<|>/
180: return false
181: else
182: return true
183: end
184: end
Eat spaces while looking for the next comparison symbol
# File lib/mcollective/matcher/scanner.rb, line 187
187: def lookahead(index)
188: index += 1
189: while(index <= @arguments.size)
190: @white_spaces += 1
191: unless(@arguments[index] =~ /\s/)
192: @seperation_counter +=1
193: return index
194: end
195: index += 1
196: end
197: return nil
198: end
Disabled; run with --debug to generate this.
Generated with the Darkfish Rdoc Generator 1.1.6.