summaryrefslogtreecommitdiffstats
path: root/lib/puppet/parser/lexer.rb
diff options
context:
space:
mode:
authorMarkus Roberts <Markus@reality.com>2010-07-09 18:05:04 -0700
committerMarkus Roberts <Markus@reality.com>2010-07-09 18:05:04 -0700
commit9ee56f2e67be973da49b1d3f21de1bf87de35e6f (patch)
treeddab8c01509f47664c52c8a6b165bb5a974f138f /lib/puppet/parser/lexer.rb
parent051bd98751d9d4bc97f93f66723d9b7a00c0cfb4 (diff)
downloadpuppet-9ee56f2e67be973da49b1d3f21de1bf87de35e6f.tar.gz
puppet-9ee56f2e67be973da49b1d3f21de1bf87de35e6f.tar.xz
puppet-9ee56f2e67be973da49b1d3f21de1bf87de35e6f.zip
Code smell: Inconsistent indentation and related formatting issues
* Replaced 163 occurances of defined\? +([@a-zA-Z_.0-9?=]+) with defined?(\1) This makes detecting subsequent patterns easier. 3 Examples: The code: if ! defined? @parse_config becomes: if ! defined?(@parse_config) The code: return @option_parser if defined? @option_parser becomes: return @option_parser if defined?(@option_parser) The code: if defined? @local and @local becomes: if defined?(@local) and @local * Eliminate trailing spaces. Replaced 428 occurances of ^(.*?) +$ with \1 1 file was skipped. test/ral/providers/host/parsed.rb because 0 * Replace leading tabs with an appropriate number of spaces. Replaced 306 occurances of ^(\t+)(.*) with Tabs are not consistently expanded in all environments. * Don't arbitrarily wrap on sprintf (%) operator. Replaced 143 occurances of (.*['"] *%) +(.*) with Splitting the line does nothing to aid clarity and hinders further refactorings. 3 Examples: The code: raise Puppet::Error, "Cannot create %s: basedir %s is a file" % [dir, File.join(path)] becomes: raise Puppet::Error, "Cannot create %s: basedir %s is a file" % [dir, File.join(path)] The code: Puppet.err "Will not start without authorization file %s" % Puppet[:authconfig] becomes: Puppet.err "Will not start without authorization file %s" % Puppet[:authconfig] The code: $stderr.puts "Could not find host for PID %s with status %s" % [pid, $?.exitstatus] becomes: $stderr.puts "Could not find host for PID %s with status %s" % [pid, $?.exitstatus] * Don't break short arrays/parameter list in two. Replaced 228 occurances of (.*) +(.*) with 3 Examples: The code: puts @format.wrap(type.provider(prov).doc, :indent => 4, :scrub => true) becomes: puts @format.wrap(type.provider(prov).doc, :indent => 4, :scrub => true) The code: assert(FileTest.exists?(daily), "Did not make daily graph for %s" % type) becomes: assert(FileTest.exists?(daily), "Did not make daily graph for %s" % type) The code: assert(prov.target_object(:first).read !~ /^notdisk/, "Did not remove thing from disk") becomes: assert(prov.target_object(:first).read !~ /^notdisk/, "Did not remove thing from disk") * If arguments must wrap, treat them all equally Replaced 510 occurances of lines ending in things like ...(foo, or ...(bar(1,3), with \1 \2 3 Examples: The code: midscope.to_hash(false), becomes: assert_equal( The code: botscope.to_hash(true), becomes: # bottomscope, then checking that we see the right stuff. The code: :path => link, becomes: * Replaced 4516 occurances of ^( *)(.*) with The present code base is supposed to use four-space indentation. In some places we failed to maintain that standard. These should be fixed regardless of the 2 vs. 4 space question. 15 Examples: The code: def run_comp(cmd) puts cmd results = [] old_sync = $stdout.sync $stdout.sync = true line = [] begin open("| #{cmd}", "r") do |f| until f.eof? do c = f.getc becomes: def run_comp(cmd) puts cmd results = [] old_sync = $stdout.sync $stdout.sync = true line = [] begin open("| #{cmd}", "r") do |f| until f.eof? do c = f.getc The code: s.gsub!(/.{4}/n, '\\\\u\&') } string.force_encoding(Encoding::UTF_8) string rescue Iconv::Failure => e raise GeneratorError, "Caught #{e.class}: #{e}" end else def utf8_to_pson(string) # :nodoc: string = string.gsub(/["\\\x0-\x1f]/) { MAP[$&] } string.gsub!(/( becomes: s.gsub!(/.{4}/n, '\\\\u\&') } string.force_encoding(Encoding::UTF_8) string rescue Iconv::Failure => e raise GeneratorError, "Caught #{e.class}: #{e}" end else def utf8_to_pson(string) # :nodoc: string = string.gsub(/["\\\x0-\x1f]/) { MAP[$&] } string.gsub!(/( The code: end } rvalues: rvalue | rvalues comma rvalue { if val[0].instance_of?(AST::ASTArray) result = val[0].push(val[2]) else result = ast AST::ASTArray, :children => [val[0],val[2]] end } becomes: end } rvalues: rvalue | rvalues comma rvalue { if val[0].instance_of?(AST::ASTArray) result = val[0].push(val[2]) else result = ast AST::ASTArray, :children => [val[0],val[2]] end } The code: #passwdproc = proc { @password } keytext = @key.export( OpenSSL::Cipher::DES.new(:EDE3, :CBC), @password ) File.open(@keyfile, "w", 0400) { |f| f << keytext } becomes: # passwdproc = proc { @password } keytext = @key.export( OpenSSL::Cipher::DES.new(:EDE3, :CBC), @password ) File.open(@keyfile, "w", 0400) { |f| f << keytext } The code: end def to_manifest "%s { '%s':\n%s\n}" % [self.type.to_s, self.name, @params.collect { |p, v| if v.is_a? Array " #{p} => [\'#{v.join("','")}\']" else " #{p} => \'#{v}\'" end }.join(",\n") becomes: end def to_manifest "%s { '%s':\n%s\n}" % [self.type.to_s, self.name, @params.collect { |p, v| if v.is_a? Array " #{p} => [\'#{v.join("','")}\']" else " #{p} => \'#{v}\'" end }.join(",\n") The code: via the augeas tool. Requires: - augeas to be installed (http://www.augeas.net) - ruby-augeas bindings Sample usage with a string:: augeas{\"test1\" : context => \"/files/etc/sysconfig/firstboot\", changes => \"set RUN_FIRSTBOOT YES\", becomes: via the augeas tool. Requires: - augeas to be installed (http://www.augeas.net) - ruby-augeas bindings Sample usage with a string:: augeas{\"test1\" : context => \"/files/etc/sysconfig/firstboot\", changes => \"set RUN_FIRSTBOOT YES\", The code: names.should_not be_include("root") end describe "when generating a purgeable resource" do it "should be included in the generated resources" do Puppet::Type.type(:host).stubs(:instances).returns [@purgeable_resource] @resources.generate.collect { |r| r.ref }.should include(@purgeable_resource.ref) end end describe "when the instance's do not have an ensure property" do becomes: names.should_not be_include("root") end describe "when generating a purgeable resource" do it "should be included in the generated resources" do Puppet::Type.type(:host).stubs(:instances).returns [@purgeable_resource] @resources.generate.collect { |r| r.ref }.should include(@purgeable_resource.ref) end end describe "when the instance's do not have an ensure property" do The code: describe "when the instance's do not have an ensure property" do it "should not be included in the generated resources" do @no_ensure_resource = Puppet::Type.type(:exec).new(:name => '/usr/bin/env echo') Puppet::Type.type(:host).stubs(:instances).returns [@no_ensure_resource] @resources.generate.collect { |r| r.ref }.should_not include(@no_ensure_resource.ref) end end describe "when the instance's ensure property does not accept absent" do it "should not be included in the generated resources" do @no_absent_resource = Puppet::Type.type(:service).new(:name => 'foobar') becomes: describe "when the instance's do not have an ensure property" do it "should not be included in the generated resources" do @no_ensure_resource = Puppet::Type.type(:exec).new(:name => '/usr/bin/env echo') Puppet::Type.type(:host).stubs(:instances).returns [@no_ensure_resource] @resources.generate.collect { |r| r.ref }.should_not include(@no_ensure_resource.ref) end end describe "when the instance's ensure property does not accept absent" do it "should not be included in the generated resources" do @no_absent_resource = Puppet::Type.type(:service).new(:name => 'foobar') The code: func = nil assert_nothing_raised do func = Puppet::Parser::AST::Function.new( :name => "template", :ftype => :rvalue, :arguments => AST::ASTArray.new( :children => [stringobj(template)] ) becomes: func = nil assert_nothing_raised do func = Puppet::Parser::AST::Function.new( :name => "template", :ftype => :rvalue, :arguments => AST::ASTArray.new( :children => [stringobj(template)] ) The code: assert( @store.allowed?("hostname.madstop.com", "192.168.1.50"), "hostname not allowed") assert( ! @store.allowed?("name.sub.madstop.com", "192.168.0.50"), "subname name allowed") becomes: assert( @store.allowed?("hostname.madstop.com", "192.168.1.50"), "hostname not allowed") assert( ! @store.allowed?("name.sub.madstop.com", "192.168.0.50"), "subname name allowed") The code: assert_nothing_raised { server = Puppet::Network::Handler.fileserver.new( :Local => true, :Config => false ) } becomes: assert_nothing_raised { server = Puppet::Network::Handler.fileserver.new( :Local => true, :Config => false ) } The code: 'yay', { :failonfail => false, :uid => @user.uid, :gid => @user.gid } ).returns('output') output = Puppet::Util::SUIDManager.run_and_capture 'yay', @user.uid, @user.gid becomes: 'yay', { :failonfail => false, :uid => @user.uid, :gid => @user.gid } ).returns('output') output = Puppet::Util::SUIDManager.run_and_capture 'yay', @user.uid, @user.gid The code: ).times(1) pkg.provider.expects( :aptget ).with( '-y', '-q', 'remove', 'faff' becomes: ).times(1) pkg.provider.expects( :aptget ).with( '-y', '-q', 'remove', 'faff' The code: johnny one two billy three four\n" # Just parse and generate, to make sure it's isomorphic. assert_nothing_raised do assert_equal(text, @parser.to_file(@parser.parse(text)), "parsing was not isomorphic") end end def test_valid_attrs becomes: johnny one two billy three four\n" # Just parse and generate, to make sure it's isomorphic. assert_nothing_raised do assert_equal(text, @parser.to_file(@parser.parse(text)), "parsing was not isomorphic") end end def test_valid_attrs The code: "testing", :onboolean => [true, "An on bool"], :string => ["a string", "A string arg"] ) result = [] should = [] assert_nothing_raised("Add args failed") do @config.addargs(result) end @config.each do |name, element| becomes: "testing", :onboolean => [true, "An on bool"], :string => ["a string", "A string arg"] ) result = [] should = [] assert_nothing_raised("Add args failed") do @config.addargs(result) end @config.each do |name, element|
Diffstat (limited to 'lib/puppet/parser/lexer.rb')
-rw-r--r--lib/puppet/parser/lexer.rb158
1 files changed, 82 insertions, 76 deletions
diff --git a/lib/puppet/parser/lexer.rb b/lib/puppet/parser/lexer.rb
index 3ac16b56a..5d1ce8bc7 100644
--- a/lib/puppet/parser/lexer.rb
+++ b/lib/puppet/parser/lexer.rb
@@ -18,7 +18,7 @@ class Puppet::Parser::Lexer
def lex_error msg
raise Puppet::LexError.new(msg)
end
-
+
class Token
attr_accessor :regex, :name, :string, :skip, :incr_line, :skip_text, :accumulate
@@ -45,10 +45,10 @@ class Puppet::Parser::Lexer
@name.to_s
end
end
-
+
def acceptable?(context={})
# By default tokens are aceeptable in any context
- true
+ true
end
end
@@ -108,55 +108,58 @@ class Puppet::Parser::Lexer
end
TOKENS = TokenList.new
- TOKENS.add_tokens(
- '[' => :LBRACK,
- ']' => :RBRACK,
- '{' => :LBRACE,
- '}' => :RBRACE,
- '(' => :LPAREN,
- ')' => :RPAREN,
- '=' => :EQUALS,
- '+=' => :APPENDS,
- '==' => :ISEQUAL,
- '>=' => :GREATEREQUAL,
- '>' => :GREATERTHAN,
- '<' => :LESSTHAN,
- '<=' => :LESSEQUAL,
- '!=' => :NOTEQUAL,
- '!' => :NOT,
- ',' => :COMMA,
- '.' => :DOT,
- ':' => :COLON,
- '@' => :AT,
- '<<|' => :LLCOLLECT,
- '->' => :IN_EDGE,
- '<-' => :OUT_EDGE,
- '~>' => :IN_EDGE_SUB,
- '<~' => :OUT_EDGE_SUB,
- '|>>' => :RRCOLLECT,
- '<|' => :LCOLLECT,
- '|>' => :RCOLLECT,
- ';' => :SEMIC,
- '?' => :QMARK,
- '\\' => :BACKSLASH,
- '=>' => :FARROW,
- '+>' => :PARROW,
- '+' => :PLUS,
- '-' => :MINUS,
- '/' => :DIV,
- '*' => :TIMES,
- '<<' => :LSHIFT,
- '>>' => :RSHIFT,
- '=~' => :MATCH,
- '!~' => :NOMATCH,
- %r{([a-z][-\w]*)?(::[a-z][-\w]*)+} => :CLASSNAME, # Require '::' in the class name, else we'd compete with NAME
- %r{((::){0,1}[A-Z][-\w]*)+} => :CLASSREF,
- "<string>" => :STRING,
- "<dqstring up to first interpolation>" => :DQPRE,
- "<dqstring between two interpolations>" => :DQMID,
- "<dqstring after final interpolation>" => :DQPOST,
- "<boolean>" => :BOOLEAN
- )
+
+ TOKENS.add_tokens(
+
+ '[' => :LBRACK,
+ ']' => :RBRACK,
+ '{' => :LBRACE,
+ '}' => :RBRACE,
+ '(' => :LPAREN,
+
+ ')' => :RPAREN,
+ '=' => :EQUALS,
+ '+=' => :APPENDS,
+ '==' => :ISEQUAL,
+ '>=' => :GREATEREQUAL,
+ '>' => :GREATERTHAN,
+ '<' => :LESSTHAN,
+ '<=' => :LESSEQUAL,
+ '!=' => :NOTEQUAL,
+ '!' => :NOT,
+ ',' => :COMMA,
+ '.' => :DOT,
+ ':' => :COLON,
+ '@' => :AT,
+ '<<|' => :LLCOLLECT,
+ '->' => :IN_EDGE,
+ '<-' => :OUT_EDGE,
+ '~>' => :IN_EDGE_SUB,
+ '<~' => :OUT_EDGE_SUB,
+ '|>>' => :RRCOLLECT,
+ '<|' => :LCOLLECT,
+ '|>' => :RCOLLECT,
+ ';' => :SEMIC,
+ '?' => :QMARK,
+ '\\' => :BACKSLASH,
+ '=>' => :FARROW,
+ '+>' => :PARROW,
+ '+' => :PLUS,
+ '-' => :MINUS,
+ '/' => :DIV,
+ '*' => :TIMES,
+ '<<' => :LSHIFT,
+ '>>' => :RSHIFT,
+ '=~' => :MATCH,
+ '!~' => :NOMATCH,
+ %r{([a-z][-\w]*)?(::[a-z][-\w]*)+} => :CLASSNAME, # Require '::' in the class name, else we'd compete with NAME
+ %r{((::){0,1}[A-Z][-\w]*)+} => :CLASSREF,
+ "<string>" => :STRING,
+ "<dqstring up to first interpolation>" => :DQPRE,
+ "<dqstring between two interpolations>" => :DQMID,
+ "<dqstring after final interpolation>" => :DQPOST,
+ "<boolean>" => :BOOLEAN
+ )
TOKENS.add_token :NUMBER, %r{\b(?:0[xX][0-9A-Fa-f]+|0?\d+(?:\.\d+)?(?:[eE]-?\d+)?)\b} do |lexer, value|
[TOKENS[:NAME], value]
@@ -224,8 +227,8 @@ class Puppet::Parser::Lexer
DQ_initial_token_types = {'$' => :DQPRE,'"' => :STRING}
DQ_continuation_token_types = {'$' => :DQMID,'"' => :DQPOST}
- TOKENS.add_token :DQUOTE, /"/ do |lexer, value|
- lexer.tokenize_interpolated_string(DQ_initial_token_types)
+ TOKENS.add_token :DQUOTE, /"/ do |lexer, value|
+ lexer.tokenize_interpolated_string(DQ_initial_token_types)
end
TOKENS.add_token :DQCONT, /\}/ do |lexer, value|
@@ -261,23 +264,26 @@ class Puppet::Parser::Lexer
KEYWORDS = TokenList.new
- KEYWORDS.add_tokens(
- "case" => :CASE,
- "class" => :CLASS,
- "default" => :DEFAULT,
- "define" => :DEFINE,
- "import" => :IMPORT,
- "if" => :IF,
- "elsif" => :ELSIF,
- "else" => :ELSE,
- "inherits" => :INHERITS,
- "node" => :NODE,
- "and" => :AND,
- "or" => :OR,
- "undef" => :UNDEF,
- "false" => :FALSE,
- "true" => :TRUE,
- "in" => :IN
+
+ KEYWORDS.add_tokens(
+
+ "case" => :CASE,
+ "class" => :CLASS,
+ "default" => :DEFAULT,
+ "define" => :DEFINE,
+ "import" => :IMPORT,
+ "if" => :IF,
+ "elsif" => :ELSIF,
+ "else" => :ELSE,
+ "inherits" => :INHERITS,
+ "node" => :NODE,
+ "and" => :AND,
+ "or" => :OR,
+ "undef" => :UNDEF,
+ "false" => :FALSE,
+ "true" => :TRUE,
+
+ "in" => :IN
)
def clear
@@ -318,7 +324,7 @@ class Puppet::Parser::Lexer
# until we either match or run out of chars. This way our worst-case is three
# tries, where it is otherwise the number of string token we have. Also,
# the lookups are optimized hash lookups, instead of regex scans.
- #
+ #
s = @scanner.peek(3)
token = TOKENS.lookup(s[0,3]) || TOKENS.lookup(s[0,2]) || TOKENS.lookup(s[0,1])
[ token, token && @scanner.scan(token.regex) ]
@@ -352,7 +358,7 @@ class Puppet::Parser::Lexer
end
def indefine?
- if defined? @indefine
+ if defined?(@indefine)
@indefine
else
false
@@ -380,8 +386,8 @@ class Puppet::Parser::Lexer
@expected = []
@commentstack = [ ['', @line] ]
@lexing_context = {
- :after => nil,
- :start_of_line => true,
+ :after => nil,
+ :start_of_line => true,
:string_interpolation_depth => 0
}
end
@@ -527,7 +533,7 @@ class Puppet::Parser::Lexer
ch
else
Puppet.warning "Unrecognised escape sequence '\\#{ch}'#{file && " in file #{file}"}#{line && " at line #{line}"}"
- "\\#{ch}"
+ "\\#{ch}"
end
end
}