diff --git a/lib/puppet/defaults.rb b/lib/puppet/defaults.rb index 0c54c1afa..eb4a070e5 100644 --- a/lib/puppet/defaults.rb +++ b/lib/puppet/defaults.rb @@ -1,955 +1,990 @@ # The majority of Puppet's configuration settings are set in this file. module Puppet setdefaults(:main, :confdir => [Puppet.run_mode.conf_dir, "The main Puppet configuration directory. The default for this setting is calculated based on the user. If the process is running as root or the user that Puppet is supposed to run as, it defaults to a system directory, but if it's running as any other user, it defaults to being in the user's home directory."], :vardir => [Puppet.run_mode.var_dir, "Where Puppet stores dynamic and growing data. The default for this setting is calculated specially, like `confdir`_."], :name => [Puppet.application_name.to_s, "The name of the application, if we are running as one. The default is essentially $0 without the path or `.rb`."], :run_mode => [Puppet.run_mode.name.to_s, "The effective 'run mode' of the application: master, agent, or user."] ) setdefaults(:main, :logdir => Puppet.run_mode.logopts) setdefaults(:main, :trace => [false, "Whether to print stack traces on some errors"], :autoflush => { :default => true, :desc => "Whether log files should always flush to disk.", :hook => proc { |value| Log.autoflush = value } }, :syslogfacility => ["daemon", "What syslog facility to use when logging to syslog. Syslog has a fixed list of valid facilities, and you must choose one of those; you cannot just make one up."], :statedir => { :default => "$vardir/state", :mode => 01755, :desc => "The directory where Puppet state is stored. Generally, this directory can be removed without causing harm (although it might result in spurious service restarts)." }, :rundir => { :default => Puppet.run_mode.run_dir, :mode => 01777, :desc => "Where Puppet PID files are kept." }, :genconfig => [false, "Whether to just print a configuration to stdout and exit. Only makes sense when used interactively. Takes into account arguments specified on the CLI."], :genmanifest => [false, "Whether to just print a manifest to stdout and exit. Only makes sense when used interactively. Takes into account arguments specified on the CLI."], :configprint => ["", "Print the value of a specific configuration setting. If the name of a setting is provided for this, then the value is printed and puppet exits. Comma-separate multiple values. For a list of all values, specify 'all'."], :color => { :default => "ansi", :type => :setting, :desc => "Whether to use colors when logging to the console. Valid values are `ansi` (equivalent to `true`), `html`, and `false`, which produces no color.", }, :mkusers => [false, "Whether to create the necessary user and group that puppet agent will run as."], :manage_internal_file_permissions => [true, "Whether Puppet should manage the owner, group, and mode of files it uses internally" ], :onetime => {:default => false, :desc => "Run the configuration once, rather than as a long-running daemon. This is useful for interactively running puppetd.", :short => 'o' }, :path => {:default => "none", :desc => "The shell search path. Defaults to whatever is inherited from the parent process.", :call_on_define => true, # Call our hook with the default value, so we always get the libdir set. :hook => proc do |value| ENV["PATH"] = "" if ENV["PATH"].nil? ENV["PATH"] = value unless value == "none" paths = ENV["PATH"].split(File::PATH_SEPARATOR) Puppet::Util::Platform.default_paths.each do |path| ENV["PATH"] += File::PATH_SEPARATOR + path unless paths.include?(path) end value end }, :libdir => {:default => "$vardir/lib", :desc => "An extra search path for Puppet. This is only useful for those files that Puppet will load on demand, and is only guaranteed to work for those cases. In fact, the autoload mechanism is responsible for making sure this directory is in Ruby's search path", :call_on_define => true, # Call our hook with the default value, so we always get the libdir set. :hook => proc do |value| $LOAD_PATH.delete(@oldlibdir) if defined?(@oldlibdir) and $LOAD_PATH.include?(@oldlibdir) @oldlibdir = value $LOAD_PATH << value end }, :ignoreimport => [false, "If true, allows the parser to continue without requiring all files referenced with `import` statements to exist. This setting was primarily designed for use with commit hooks for parse-checking."], :authconfig => [ "$confdir/namespaceauth.conf", "The configuration file that defines the rights to the different namespaces and methods. This can be used as a coarse-grained authorization system for both `puppet agent` and `puppet master`." ], :environment => {:default => "production", :desc => "The environment Puppet is running in. For clients (e.g., `puppet agent`) this determines the environment itself, which is used to find modules and much more. For servers (i.e., `puppet master`) this provides the default environment for nodes we know nothing about." }, :diff_args => ["-u", "Which arguments to pass to the diff command when printing differences between files. The command to use can be chosen with the `diff` setting."], :diff => { :default => (Puppet.features.microsoft_windows? ? "" : "diff"), :desc => "Which diff command to use when printing differences between files. This setting has no default value on Windows, as standard `diff` is not available, but Puppet can use many third-party diff tools.", }, :show_diff => [false, "Whether to log and report a contextual diff when files are being replaced. This causes partial file contents to pass through Puppet's normal logging and reporting system, so this setting should be used with caution if you are sending Puppet's reports to an insecure destination. This feature currently requires the `diff/lcs` Ruby library."], :daemonize => { :default => (Puppet.features.microsoft_windows? ? false : true), :desc => "Whether to send the process into the background. This defaults to true on POSIX systems, and to false on Windows (where Puppet currently cannot daemonize).", :short => "D", :hook => proc do |value| if value and Puppet.features.microsoft_windows? raise "Cannot daemonize on Windows" end end }, :maximum_uid => [4294967290, "The maximum allowed UID. Some platforms use negative UIDs but then ship with tools that do not know how to handle signed ints, so the UIDs show up as huge numbers that can then not be fed back into the system. This is a hackish way to fail in a slightly more useful way when that happens."], :route_file => ["$confdir/routes.yaml", "The YAML file containing indirector route configuration."], :node_terminus => ["plain", "Where to find information about nodes."], :catalog_terminus => ["compiler", "Where to get node catalogs. This is useful to change if, for instance, you'd like to pre-compile catalogs and store them in memcached or some other easily-accessed store."], :facts_terminus => { :default => Puppet.application_name.to_s == "master" ? 'yaml' : 'facter', :desc => "The node facts terminus.", :hook => proc do |value| require 'puppet/node/facts' # Cache to YAML if we're uploading facts away if %w[rest inventory_service].include? value.to_s Puppet::Node::Facts.indirection.cache_class = :yaml end end }, :inventory_terminus => [ "$facts_terminus", "Should usually be the same as the facts terminus" ], :httplog => { :default => "$logdir/http.log", :owner => "root", :mode => 0640, :desc => "Where the puppet agent web server logs." }, :http_proxy_host => ["none", "The HTTP proxy host to use for outgoing connections. Note: You may need to use a FQDN for the server hostname when using a proxy."], :http_proxy_port => [3128, "The HTTP proxy port to use for outgoing connections"], :filetimeout => [ 15, "The minimum time to wait (in seconds) between checking for updates in configuration files. This timeout determines how quickly Puppet checks whether a file (such as manifests or templates) has changed on disk." ], :queue_type => ["stomp", "Which type of queue to use for asynchronous processing."], :queue_type => ["stomp", "Which type of queue to use for asynchronous processing."], :queue_source => ["stomp://localhost:61613/", "Which type of queue to use for asynchronous processing. If your stomp server requires authentication, you can include it in the URI as long as your stomp client library is at least 1.1.1"], :async_storeconfigs => {:default => false, :desc => "Whether to use a queueing system to provide asynchronous database integration. Requires that `puppetqd` be running and that 'PSON' support for ruby be installed.", :hook => proc do |value| if value # This reconfigures the terminii for Node, Facts, and Catalog Puppet.settings[:storeconfigs] = true # But then we modify the configuration Puppet::Resource::Catalog.indirection.cache_class = :queue else raise "Cannot disable asynchronous storeconfigs in a running process" end end }, :thin_storeconfigs => {:default => false, :desc => "Boolean; whether Puppet should store only facts and exported resources in the storeconfigs database. This will improve the performance of exported resources with the older `active_record` backend, but will disable external tools that search the storeconfigs database. Thinning catalogs is generally unnecessary when using PuppetDB to store catalogs.", :hook => proc do |value| Puppet.settings[:storeconfigs] = true if value end }, :config_version => ["", "How to determine the configuration version. By default, it will be the time that the configuration is parsed, but you can provide a shell script to override how the version is determined. The output of this script will be added to every log message in the reports, allowing you to correlate changes on your hosts to the source version on the server."], :zlib => [true, "Boolean; whether to use the zlib library", ], :prerun_command => ["", "A command to run before every agent run. If this command returns a non-zero return code, the entire Puppet run will fail."], :postrun_command => ["", "A command to run after every agent run. If this command returns a non-zero return code, the entire Puppet run will be considered to have failed, even though it might have performed work during the normal run."], :freeze_main => [false, "Freezes the 'main' class, disallowing any code to be added to it. This essentially means that you can't have any code outside of a node, class, or definition other than in the site manifest."] ) Puppet.setdefaults(:module_tool, :module_repository => ['http://forge.puppetlabs.com', "The module repository"], :module_working_dir => ['$vardir/puppet-module', "The directory into which module tool data is stored"] ) hostname = Facter["hostname"].value domain = Facter["domain"].value if domain and domain != "" fqdn = [hostname, domain].join(".") else fqdn = hostname end Puppet.setdefaults( :main, # We have to downcase the fqdn, because the current ssl stuff (as oppsed to in master) doesn't have good facilities for # manipulating naming. :certname => {:default => fqdn.downcase, :desc => "The name to use when handling certificates. Defaults to the fully qualified domain name.", :call_on_define => true, # Call our hook with the default value, so we're always downcased :hook => proc { |value| raise(ArgumentError, "Certificate names must be lower case; see #1168") unless value == value.downcase }}, :certdnsnames => { :default => '', :hook => proc do |value| unless value.nil? or value == '' then Puppet.warning < < { :default => '', :desc => < { :default => "$ssldir/certs", :owner => "service", :desc => "The certificate directory." }, :ssldir => { :default => "$confdir/ssl", :mode => 0771, :owner => "service", :desc => "Where SSL certificates are kept." }, :publickeydir => { :default => "$ssldir/public_keys", :owner => "service", :desc => "The public key directory." }, :requestdir => { :default => "$ssldir/certificate_requests", :owner => "service", :desc => "Where host certificate requests are stored." }, :privatekeydir => { :default => "$ssldir/private_keys", :mode => 0750, :owner => "service", :desc => "The private key directory." }, :privatedir => { :default => "$ssldir/private", :mode => 0750, :owner => "service", :desc => "Where the client stores private certificate information." }, :passfile => { :default => "$privatedir/password", :mode => 0640, :owner => "service", :desc => "Where puppet agent stores the password for its private key. Generally unused." }, :hostcsr => { :default => "$ssldir/csr_$certname.pem", :mode => 0644, :owner => "service", :desc => "Where individual hosts store and look for their certificate requests." }, :hostcert => { :default => "$certdir/$certname.pem", :mode => 0644, :owner => "service", :desc => "Where individual hosts store and look for their certificates." }, :hostprivkey => { :default => "$privatekeydir/$certname.pem", :mode => 0600, :owner => "service", :desc => "Where individual hosts store and look for their private key." }, :hostpubkey => { :default => "$publickeydir/$certname.pem", :mode => 0644, :owner => "service", :desc => "Where individual hosts store and look for their public key." }, :localcacert => { :default => "$certdir/ca.pem", :mode => 0644, :owner => "service", :desc => "Where each client stores the CA certificate." }, :hostcrl => { :default => "$ssldir/crl.pem", :mode => 0644, :owner => "service", :desc => "Where the host's certificate revocation list can be found. This is distinct from the certificate authority's CRL." }, :certificate_revocation => [true, "Whether certificate revocation should be supported by downloading a Certificate Revocation List (CRL) to all clients. If enabled, CA chaining will almost definitely not work."] ) setdefaults( :ca, :ca_name => ["Puppet CA: $certname", "The name to use the Certificate Authority certificate."], :cadir => { :default => "$ssldir/ca", :owner => "service", :group => "service", :mode => 0770, :desc => "The root directory for the certificate authority." }, :cacert => { :default => "$cadir/ca_crt.pem", :owner => "service", :group => "service", :mode => 0660, :desc => "The CA certificate." }, :cakey => { :default => "$cadir/ca_key.pem", :owner => "service", :group => "service", :mode => 0660, :desc => "The CA private key." }, :capub => { :default => "$cadir/ca_pub.pem", :owner => "service", :group => "service", :desc => "The CA public key." }, :cacrl => { :default => "$cadir/ca_crl.pem", :owner => "service", :group => "service", :mode => 0664, :desc => "The certificate revocation list (CRL) for the CA. Will be used if present but otherwise ignored.", :hook => proc do |value| if value == 'false' Puppet.warning "Setting the :cacrl to 'false' is deprecated; Puppet will just ignore the crl if yours is missing" end end }, :caprivatedir => { :default => "$cadir/private", :owner => "service", :group => "service", :mode => 0770, :desc => "Where the CA stores private certificate information." }, :csrdir => { :default => "$cadir/requests", :owner => "service", :group => "service", :desc => "Where the CA stores certificate requests" }, :signeddir => { :default => "$cadir/signed", :owner => "service", :group => "service", :mode => 0770, :desc => "Where the CA stores signed certificates." }, :capass => { :default => "$caprivatedir/ca.pass", :owner => "service", :group => "service", :mode => 0660, :desc => "Where the CA stores the password for the private key" }, :serial => { :default => "$cadir/serial", :owner => "service", :group => "service", :mode => 0644, :desc => "Where the serial number for certificates is stored." }, :autosign => { :default => "$confdir/autosign.conf", :mode => 0644, :desc => "Whether to enable autosign. Valid values are true (which autosigns any key request, and is a very bad idea), false (which never autosigns any key request), and the path to a file, which uses that configuration file to determine which keys to sign."}, :allow_duplicate_certs => [false, "Whether to allow a new certificate request to overwrite an existing certificate."], :ca_days => ["", "How long a certificate should be valid, in days. This setting is deprecated; use `ca_ttl` instead"], :ca_ttl => ["5y", "The default TTL for new certificates; valid values must be an integer, optionally followed by one of the units 'y' (years of 365 days), 'd' (days), 'h' (hours), or 's' (seconds). The unit defaults to seconds. If this setting is set, ca_days is ignored. Examples are '3600' (one hour) and '1825d', which is the same as '5y' (5 years) "], :ca_md => ["md5", "The type of hash used in certificates."], :req_bits => [4096, "The bit length of the certificates."], :keylength => [4096, "The bit length of keys."], :cert_inventory => { :default => "$cadir/inventory.txt", :mode => 0644, :owner => "service", :group => "service", :desc => "A Complete listing of all certificates" } ) # Define the config default. setdefaults( Puppet.settings[:name], :config => ["$confdir/puppet.conf", "The configuration file for #{Puppet[:name]}."], :pidfile => ["$rundir/$name.pid", "The pid file"], :bindaddress => ["", "The address a listening server should bind to. Mongrel servers default to 127.0.0.1 and WEBrick defaults to 0.0.0.0."], :servertype => {:default => "webrick", :desc => "The type of server to use. Currently supported options are webrick and mongrel. If you use mongrel, you will need a proxy in front of the process or processes, since Mongrel cannot speak SSL.", :call_on_define => true, # Call our hook with the default value, so we always get the correct bind address set. :hook => proc { |value| value == "webrick" ? Puppet.settings[:bindaddress] = "0.0.0.0" : Puppet.settings[:bindaddress] = "127.0.0.1" if Puppet.settings[:bindaddress] == "" } } ) setdefaults(:master, :user => ["puppet", "The user puppet master should run as."], :group => ["puppet", "The group puppet master should run as."], :manifestdir => ["$confdir/manifests", "Where puppet master looks for its manifests."], :manifest => ["$manifestdir/site.pp", "The entry-point manifest for puppet master."], :code => ["", "Code to parse directly. This is essentially only used by `puppet`, and should only be set if you're writing your own Puppet executable"], :masterlog => { :default => "$logdir/puppetmaster.log", :owner => "service", :group => "service", :mode => 0660, :desc => "Where puppet master logs. This is generally not used, since syslog is the default log destination." }, :masterhttplog => { :default => "$logdir/masterhttp.log", :owner => "service", :group => "service", :mode => 0660, :create => true, :desc => "Where the puppet master web server logs." }, :masterport => [8140, "Which port puppet master listens on."], :node_name => ["cert", "How the puppet master determines the client's identity and sets the 'hostname', 'fqdn' and 'domain' facts for use in the manifest, in particular for determining which 'node' statement applies to the client. Possible values are 'cert' (use the subject's CN in the client's certificate) and 'facter' (use the hostname that the client reported in its facts)"], :bucketdir => { :default => "$vardir/bucket", :mode => 0750, :owner => "service", :group => "service", :desc => "Where FileBucket files are stored." }, :rest_authconfig => [ "$confdir/auth.conf", "The configuration file that defines the rights to the different rest indirections. This can be used as a fine-grained authorization system for `puppet master`." ], :ca => [true, "Whether the master should function as a certificate authority."], :modulepath => { :default => "$confdir/modules#{File::PATH_SEPARATOR}/usr/share/puppet/modules", :desc => "The search path for modules, as a list of directories separated by the system path separator character. (The POSIX path separator is ':', and the Windows path separator is ';'.)", :type => :setting # We don't want this to be considered a file, since it's multiple files. }, :ssl_client_header => ["HTTP_X_CLIENT_DN", "The header containing an authenticated client's SSL DN. Only used with Mongrel. This header must be set by the proxy to the authenticated client's SSL DN (e.g., `/CN=puppet.puppetlabs.com`). See http://projects.puppetlabs.com/projects/puppet/wiki/Using_Mongrel for more information."], :ssl_client_verify_header => ["HTTP_X_CLIENT_VERIFY", "The header containing the status message of the client verification. Only used with Mongrel. This header must be set by the proxy to 'SUCCESS' if the client successfully authenticated, and anything else otherwise. See http://projects.puppetlabs.com/projects/puppet/wiki/Using_Mongrel for more information."], # To make sure this directory is created before we try to use it on the server, we need # it to be in the server section (#1138). :yamldir => {:default => "$vardir/yaml", :owner => "service", :group => "service", :mode => "750", :desc => "The directory in which YAML data is stored, usually in a subdirectory."}, :server_datadir => {:default => "$vardir/server_data", :owner => "service", :group => "service", :mode => "750", :desc => "The directory in which serialized data is stored, usually in a subdirectory."}, :reports => ["store", "The list of reports to generate. All reports are looked for in `puppet/reports/name.rb`, and multiple report names should be comma-separated (whitespace is okay)." ], :reportdir => {:default => "$vardir/reports", :mode => 0750, :owner => "service", :group => "service", :desc => "The directory in which to store reports received from the client. Each client gets a separate subdirectory."}, :reporturl => ["http://localhost:3000/reports/upload", "The URL used by the http reports processor to send reports"], :fileserverconfig => ["$confdir/fileserver.conf", "Where the fileserver configuration is stored."], :strict_hostname_checking => [false, "Whether to only search for the complete hostname as it is in the certificate when searching for node information in the catalogs."] ) setdefaults(:metrics, :rrddir => {:default => "$vardir/rrd", :mode => 0750, :owner => "service", :group => "service", :desc => "The directory where RRD database files are stored. Directories for each reporting host will be created under this directory." }, :rrdinterval => ["$runinterval", "How often RRD should expect data. This should match how often the hosts report back to the server."] ) setdefaults(:device, :devicedir => {:default => "$vardir/devices", :mode => "750", :desc => "The root directory of devices' $vardir"}, :deviceconfig => ["$confdir/device.conf","Path to the device config file for puppet device"] ) setdefaults(:agent, :node_name_value => { :default => "$certname", :desc => "The explicit value used for the node name for all requests the agent makes to the master. WARNING: This setting is mutually exclusive with node_name_fact. Changing this setting also requires changes to the default auth.conf configuration on the Puppet Master. Please see http://links.puppetlabs.com/node_name_value for more information." }, :node_name_fact => { :default => "", :desc => "The fact name used to determine the node name used for all requests the agent makes to the master. WARNING: This setting is mutually exclusive with node_name_value. Changing this setting also requires changes to the default auth.conf configuration on the Puppet Master. Please see http://links.puppetlabs.com/node_name_fact for more information.", :hook => proc do |value| if !value.empty? and Puppet[:node_name_value] != Puppet[:certname] raise "Cannot specify both the node_name_value and node_name_fact settings" end end }, :localconfig => { :default => "$statedir/localconfig", :owner => "root", :mode => 0660, :desc => "Where puppet agent caches the local configuration. An extension indicating the cache format is added automatically."}, :statefile => { :default => "$statedir/state.yaml", :mode => 0660, :desc => "Where puppet agent and puppet master store state associated with the running configuration. In the case of puppet master, this file reflects the state discovered through interacting with clients." }, :clientyamldir => {:default => "$vardir/client_yaml", :mode => "750", :desc => "The directory in which client-side YAML data is stored."}, :client_datadir => {:default => "$vardir/client_data", :mode => "750", :desc => "The directory in which serialized data is stored on the client."}, :classfile => { :default => "$statedir/classes.txt", :owner => "root", :mode => 0640, :desc => "The file in which puppet agent stores a list of the classes associated with the retrieved configuration. Can be loaded in the separate `puppet` executable using the `--loadclasses` option."}, :resourcefile => { :default => "$statedir/resources.txt", :owner => "root", :mode => 0640, :desc => "The file in which puppet agent stores a list of the resources associated with the retrieved configuration." }, :puppetdlog => { :default => "$logdir/puppetd.log", :owner => "root", :mode => 0640, :desc => "The log file for puppet agent. This is generally not used." }, :server => ["puppet", "The server to which server puppet agent should connect"], :ignoreschedules => [false, "Boolean; whether puppet agent should ignore schedules. This is useful for initial puppet agent runs."], :puppetport => [8139, "Which port puppet agent listens on."], :noop => [false, "Whether puppet agent should be run in noop mode."], :runinterval => [1800, # 30 minutes "How often puppet agent applies the client configuration; in seconds. Note that a runinterval of 0 means \"run continuously\" rather than \"never run.\" If you want puppet agent to never run, you should start it with the `--no-client` option."], :listen => [false, "Whether puppet agent should listen for connections. If this is true, then puppet agent will accept incoming REST API requests, subject to the default ACLs and the ACLs set in the `rest_authconfig` file. Puppet agent can respond usefully to requests on the `run`, `facts`, `certificate`, and `resource` endpoints."], :ca_server => ["$server", "The server to use for certificate authority requests. It's a separate server because it cannot and does not need to horizontally scale."], :ca_port => ["$masterport", "The port to use for the certificate authority."], :catalog_format => { :default => "", :desc => "(Deprecated for 'preferred_serialization_format') What format to use to dump the catalog. Only supports 'marshal' and 'yaml'. Only matters on the client, since it asks the server for a specific format.", :hook => proc { |value| if value Puppet.warning "Setting 'catalog_format' is deprecated; use 'preferred_serialization_format' instead." Puppet.settings[:preferred_serialization_format] = value end } }, :preferred_serialization_format => ["pson", "The preferred means of serializing ruby instances for passing over the wire. This won't guarantee that all instances will be serialized using this method, since not all classes can be guaranteed to support this format, but it will be used for all classes that support it."], :puppetdlockfile => { :default => "$statedir/puppetdlock", :type => :setting, # (#2888) Ensure this file is not added to the settings catalog. :desc => "A lock file to temporarily stop puppet agent from doing anything.", }, :usecacheonfailure => [true, "Whether to use the cached configuration when the remote configuration will not compile. This option is useful for testing new configurations, where you want to fix the broken configuration rather than reverting to a known-good one." ], :use_cached_catalog => [false, "Whether to only use the cached catalog rather than compiling a new catalog on every run. Puppet can be run with this enabled by default and then selectively disabled when a recompile is desired."], :ignorecache => [false, "Ignore cache and always recompile the configuration. This is useful for testing new configurations, where the local cache may in fact be stale even if the timestamps are up to date - if the facts change or if the server changes." ], :downcasefacts => [false, "Whether facts should be made all lowercase when sent to the server."], :dynamicfacts => ["memorysize,memoryfree,swapsize,swapfree", "Facts that are dynamic; these facts will be ignored when deciding whether changed facts should result in a recompile. Multiple facts should be comma-separated."], :splaylimit => ["$runinterval", "The maximum time to delay before runs. Defaults to being the same as the run interval."], :splay => [false, "Whether to sleep for a pseudo-random (but consistent) amount of time before a run."], :clientbucketdir => { :default => "$vardir/clientbucket", :mode => 0750, :desc => "Where FileBucket files are stored locally." }, :configtimeout => [120, "How long the client should wait for the configuration to be retrieved before considering it a failure. This can help reduce flapping if too many clients contact the server at one time." ], :reportserver => { :default => "$server", :call_on_define => false, :desc => "(Deprecated for 'report_server') The server to which to send transaction reports.", :hook => proc do |value| Puppet.settings[:report_server] = value if value end }, :report_server => ["$server", "The server to send transaction reports to." ], :report_port => ["$masterport", "The port to communicate with the report_server." ], :inventory_server => ["$server", "The server to send facts to." ], :inventory_port => ["$masterport", "The port to communicate with the inventory_server." ], :report => [true, "Whether to send reports after every transaction." ], :lastrunfile => { :default => "$statedir/last_run_summary.yaml", :mode => 0644, :desc => "Where puppet agent stores the last run report summary in yaml format." }, :lastrunreport => { :default => "$statedir/last_run_report.yaml", :mode => 0640, :desc => "Where puppet agent stores the last run report in yaml format." }, :graph => [false, "Whether to create dot graph files for the different configuration graphs. These dot files can be interpreted by tools like OmniGraffle or dot (which is part of ImageMagick)."], :graphdir => ["$statedir/graphs", "Where to store dot-outputted graphs."], :http_compression => [false, "Allow http compression in REST communication with the master. This setting might improve performance for agent -> master communications over slow WANs. Your puppet master needs to support compression (usually by activating some settings in a reverse-proxy in front of the puppet master, which rules out webrick). It is harmless to activate this settings if your master doesn't support compression, but if it supports it, this setting might reduce performance on high-speed LANs."] ) setdefaults(:inspect, :archive_files => [false, "During an inspect run, whether to archive files whose contents are audited to a file bucket."], :archive_file_server => ["$server", "During an inspect run, the file bucket server to archive files to if archive_files is set."] ) # Plugin information. setdefaults( :main, :plugindest => ["$libdir", "Where Puppet should store plugins that it pulls down from the central server."], :pluginsource => ["puppet://$server/plugins", "From where to retrieve plugins. The standard Puppet `file` type is used for retrieval, so anything that is a valid file source can be used here."], :pluginsync => [false, "Whether plugins should be synced with the central server."], :pluginsignore => [".svn CVS .git", "What files to ignore when pulling down plugins."] ) # Central fact information. setdefaults( :main, :factpath => {:default => "$vardir/lib/facter#{File::PATH_SEPARATOR}$vardir/facts", :desc => "Where Puppet should look for facts. Multiple directories should be separated by the system path separator character. (The POSIX path separator is ':', and the Windows path separator is ';'.)", :call_on_define => true, # Call our hook with the default value, so we always get the value added to facter. :type => :setting, # Don't consider it a file, because it could be multiple colon-separated files :hook => proc { |value| Facter.search(value) if Facter.respond_to?(:search) }}, :factdest => ["$vardir/facts/", "Where Puppet should store facts that it pulls down from the central server."], :factsource => ["puppet://$server/facts/", "From where to retrieve facts. The standard Puppet `file` type is used for retrieval, so anything that is a valid file source can be used here."], :factsync => [false, "Whether facts should be synced with the central server."], :factsignore => [".svn CVS", "What files to ignore when pulling down facts."] ) setdefaults( :tagmail, :tagmap => ["$confdir/tagmail.conf", "The mapping between reporting tags and email addresses."], :sendmail => [which('sendmail') || '', "Where to find the sendmail binary with which to send email."], :reportfrom => ["report@" + [Facter["hostname"].value, Facter["domain"].value].join("."), "The 'from' email address for the reports."], :smtpserver => ["none", "The server through which to send email reports."] ) setdefaults( :rails, :dblocation => { :default => "$statedir/clientconfigs.sqlite3", :mode => 0660, :owner => "service", :group => "service", :desc => "The database cache for client configurations. Used for querying within the language." }, :dbadapter => [ "sqlite3", "The type of database to use." ], :dbmigrate => [ false, "Whether to automatically migrate the database." ], :dbname => [ "puppet", "The name of the database to use." ], :dbserver => [ "localhost", "The database server for caching. Only used when networked databases are used."], :dbport => [ "", "The database password for caching. Only used when networked databases are used."], :dbuser => [ "puppet", "The database user for caching. Only used when networked databases are used."], :dbpassword => [ "puppet", "The database password for caching. Only used when networked databases are used."], :dbconnections => [ '', "The number of database connections for networked databases. Will be ignored unless the value is a positive integer."], :dbsocket => [ "", "The database socket location. Only used when networked databases are used. Will be ignored if the value is an empty string."], :railslog => {:default => "$logdir/rails.log", :mode => 0600, :owner => "service", :group => "service", :desc => "Where Rails-specific logs are sent" }, :rails_loglevel => ["info", "The log level for Rails connections. The value must be a valid log level within Rails. Production environments normally use `info` and other environments normally use `debug`."] ) setdefaults( :couchdb, :couchdb_url => ["http://127.0.0.1:5984/puppet", "The url where the puppet couchdb database will be created"] ) setdefaults( :transaction, :tags => ["", "Tags to use to find resources. If this is set, then only resources tagged with the specified tags will be applied. Values must be comma-separated."], :evaltrace => [false, "Whether each resource should log when it is being evaluated. This allows you to interactively see exactly what is being done."], :summarize => [false, "Whether to print a transaction summary." ] ) setdefaults( :main, :external_nodes => ["none", "An external command that can produce node information. The command's output must be a YAML dump of a hash, and that hash must have a `classes` key and/or a `parameters` key, where `classes` is an array or hash and `parameters` is a hash. For unknown nodes, the command should exit with a non-zero exit code. This command makes it straightforward to store your node mapping information in other data sources like databases."]) setdefaults( :ldap, :ldapnodes => [false, "Whether to search for node configurations in LDAP. See http://projects.puppetlabs.com/projects/puppet/wiki/LDAP_Nodes for more information."], :ldapssl => [false, "Whether SSL should be used when searching for nodes. Defaults to false because SSL usually requires certificates to be set up on the client side."], :ldaptls => [false, "Whether TLS should be used when searching for nodes. Defaults to false because TLS usually requires certificates to be set up on the client side."], :ldapserver => ["ldap", "The LDAP server. Only used if `ldapnodes` is enabled."], :ldapport => [389, "The LDAP port. Only used if `ldapnodes` is enabled."], :ldapstring => ["(&(objectclass=puppetClient)(cn=%s))", "The search string used to find an LDAP node."], :ldapclassattrs => ["puppetclass", "The LDAP attributes to use to define Puppet classes. Values should be comma-separated."], :ldapstackedattrs => ["puppetvar", "The LDAP attributes that should be stacked to arrays by adding the values in all hierarchy elements of the tree. Values should be comma-separated."], :ldapattrs => ["all", "The LDAP attributes to include when querying LDAP for nodes. All returned attributes are set as variables in the top-level scope. Multiple values should be comma-separated. The value 'all' returns all attributes."], :ldapparentattr => ["parentnode", "The attribute to use to define the parent node."], :ldapuser => ["", "The user to use to connect to LDAP. Must be specified as a full DN."], :ldappassword => ["", "The password to use to connect to LDAP."], :ldapbase => ["", "The search base for LDAP searches. It's impossible to provide a meaningful default here, although the LDAP libraries might have one already set. Generally, it should be the 'ou=Hosts' branch under your main directory."] ) setdefaults(:master, :storeconfigs => { :default => false, :desc => "Whether to store each client's configuration, including catalogs, facts, and related data. This also enables the import and export of resources in the Puppet language - a mechanism for exchange resources between nodes. By default this uses ActiveRecord and an SQL database to store and query the data; this, in turn, will depend on Rails being available. You can adjust the backend using the storeconfigs_backend setting.", # Call our hook with the default value, so we always get the libdir set. :call_on_define => true, :hook => proc do |value| require 'puppet/node' require 'puppet/node/facts' if value Puppet.settings[:async_storeconfigs] or Puppet::Resource::Catalog.indirection.cache_class = :store_configs Puppet::Node::Facts.indirection.cache_class = :store_configs Puppet::Node.indirection.cache_class = :store_configs Puppet::Resource.indirection.terminus_class = :store_configs end end }, :storeconfigs_backend => { :default => "active_record", :desc => "Configure the backend terminus used for StoreConfigs. By default, this uses the ActiveRecord store, which directly talks to the database from within the Puppet Master process." } ) # This doesn't actually work right now. setdefaults( :parser, :lexical => [false, "Whether to use lexical scoping (vs. dynamic)."], :templatedir => ["$vardir/templates", "Where Puppet looks for template files. Can be a list of colon-separated directories." - ] + ], + + :allow_variables_with_dashes => { + :default => false, + :desc => <<-'EOT' +Permit the use of `-` in variable names. + +This exists because we introduced a change to the lexer that accidentally +modified our behaviour: instead of following our documented spec, we now +treated `-` as a valid character in variable names. + +That bug existed, unnoticed, for an entire 363 days before we squashed it, +in which time a whole lot of people built code that depended on the observed +rather than documented behaviour. + +This is definitely and decidedly deprecated: you should not change the value +of this option. However, it might hurt less to do that than to have to +rebuild every single manifest you own! + +If you set this to true you will receive a rate-limited deprecation warning +about variables with a `-` in them, during compilation, that allows you to +identify the locations that fail and clean them up. + +Please be aware that allowing variables with `-` might break all sorts of +things around, for example, Forge modules though. Most people write code to +the documented standard, and their code might well totally fail to work with +this changed away from the default! + +Using this to migrate is great. Using it to allow a different grammar for +Puppet is a bad, bad idea that you will eventually come to regret. + +Eventually this option will be pulled entirely and you will have no choice +but to exclude variable names with a `-` in them. Don't make this your +long term strategy. +EOT + } ) setdefaults( :puppetdoc, :document_all => [false, "Document all resources"] ) end diff --git a/lib/puppet/parser/lexer.rb b/lib/puppet/parser/lexer.rb index 39dbde1bb..6707e0ae2 100644 --- a/lib/puppet/parser/lexer.rb +++ b/lib/puppet/parser/lexer.rb @@ -1,580 +1,610 @@ # the scanner/lexer require 'strscan' require 'puppet' module Puppet class LexError < RuntimeError; end end module Puppet::Parser; end class Puppet::Parser::Lexer attr_reader :last, :file, :lexing_context, :token_queue attr_accessor :line, :indefine def lex_error msg raise Puppet::LexError.new(msg) end class Token attr_accessor :regex, :name, :string, :skip, :incr_line, :skip_text, :accumulate def initialize(regex, name) if regex.is_a?(String) @name, @string = name, regex @regex = Regexp.new(Regexp.escape(@string)) else @name, @regex = name, regex end end # MQR: Why not just alias? %w{skip accumulate}.each do |method| define_method(method+"?") do self.send(method) end end def to_s if self.string @string else @name.to_s end end def acceptable?(context={}) # By default tokens are aceeptable in any context true end end # Maintain a list of tokens. class TokenList attr_reader :regex_tokens, :string_tokens def [](name) @tokens[name] end # Create a new token. def add_token(name, regex, options = {}, &block) token = Token.new(regex, name) raise(ArgumentError, "Token #{name} already exists") if @tokens.include?(name) @tokens[token.name] = token if token.string @string_tokens << token @tokens_by_string[token.string] = token else @regex_tokens << token end options.each do |name, option| token.send(name.to_s + "=", option) end token.meta_def(:convert, &block) if block_given? token end def initialize @tokens = {} @regex_tokens = [] @string_tokens = [] @tokens_by_string = {} end # Look up a token by its value, rather than name. def lookup(string) @tokens_by_string[string] end # Define more tokens. def add_tokens(hash) hash.each do |regex, name| add_token(name, regex) end end # Sort our tokens by length, so we know once we match, we're done. # This helps us avoid the O(n^2) nature of token matching. def sort_tokens @string_tokens.sort! { |a, b| b.string.length <=> a.string.length } end + + # Yield each token name and value in turn. + def each + @tokens.each {|name, value| yield name, value } + end end TOKENS = TokenList.new TOKENS.add_tokens( '[' => :LBRACK, ']' => :RBRACK, '{' => :LBRACE, '}' => :RBRACE, '(' => :LPAREN, ')' => :RPAREN, '=' => :EQUALS, '+=' => :APPENDS, '==' => :ISEQUAL, '>=' => :GREATEREQUAL, '>' => :GREATERTHAN, '<' => :LESSTHAN, '<=' => :LESSEQUAL, '!=' => :NOTEQUAL, '!' => :NOT, ',' => :COMMA, '.' => :DOT, ':' => :COLON, '@' => :AT, '<<|' => :LLCOLLECT, '->' => :IN_EDGE, '<-' => :OUT_EDGE, '~>' => :IN_EDGE_SUB, '<~' => :OUT_EDGE_SUB, '|>>' => :RRCOLLECT, '<|' => :LCOLLECT, '|>' => :RCOLLECT, ';' => :SEMIC, '?' => :QMARK, '\\' => :BACKSLASH, '=>' => :FARROW, '+>' => :PARROW, '+' => :PLUS, '-' => :MINUS, '/' => :DIV, '*' => :TIMES, '<<' => :LSHIFT, '>>' => :RSHIFT, '=~' => :MATCH, '!~' => :NOMATCH, %r{((::){0,1}[A-Z][-\w]*)+} => :CLASSREF, "" => :STRING, "" => :DQPRE, "" => :DQMID, "" => :DQPOST, "" => :BOOLEAN ) # Numbers are treated separately from names, so that they may contain dots. TOKENS.add_token :NUMBER, %r{\b(?:0[xX][0-9A-Fa-f]+|0?\d+(?:\.\d+)?(?:[eE]-?\d+)?)\b} do |lexer, value| [TOKENS[:NAME], value] end #:stopdoc: # Issue #4161 def (TOKENS[:NUMBER]).acceptable?(context={}) ![:DQPRE,:DQMID].include? context[:after] end #:startdoc: TOKENS.add_token :NAME, %r{((::)?[a-z0-9][-\w]*)(::[a-z0-9][-\w]*)*} do |lexer, value| string_token = self # we're looking for keywords here if tmp = KEYWORDS.lookup(value) string_token = tmp if [:TRUE, :FALSE].include?(string_token.name) value = eval(value) string_token = TOKENS[:BOOLEAN] end end [string_token, value] end [:NAME,:CLASSNAME,:CLASSREF].each { |name_token| #:stopdoc: # Issue #4161 def (TOKENS[name_token]).acceptable?(context={}) ![:DQPRE,:DQMID].include? context[:after] end #:startdoc: } TOKENS.add_token :COMMENT, %r{#.*}, :accumulate => true, :skip => true do |lexer,value| value.sub!(/# ?/,'') [self, value] end TOKENS.add_token :MLCOMMENT, %r{/\*(.*?)\*/}m, :accumulate => true, :skip => true do |lexer, value| lexer.line += value.count("\n") value.sub!(/^\/\* ?/,'') value.sub!(/ ?\*\/$/,'') [self,value] end TOKENS.add_token :REGEX, %r{/[^/\n]*/} do |lexer, value| # Make sure we haven't matched an escaped / while value[-2..-2] == '\\' other = lexer.scan_until(%r{/}) value += other end regex = value.sub(%r{\A/}, "").sub(%r{/\Z}, '').gsub("\\/", "/") [self, Regexp.new(regex)] end #:stopdoc: # Issue #4161 def (TOKENS[:REGEX]).acceptable?(context={}) [:NODE,:LBRACE,:RBRACE,:MATCH,:NOMATCH,:COMMA].include? context[:after] end #:startdoc: TOKENS.add_token :RETURN, "\n", :skip => true, :incr_line => true, :skip_text => true TOKENS.add_token :SQUOTE, "'" do |lexer, value| [TOKENS[:STRING], lexer.slurpstring(value,["'"],:ignore_invalid_escapes).first ] end DQ_initial_token_types = {'$' => :DQPRE,'"' => :STRING} DQ_continuation_token_types = {'$' => :DQMID,'"' => :DQPOST} TOKENS.add_token :DQUOTE, /"/ do |lexer, value| lexer.tokenize_interpolated_string(DQ_initial_token_types) end TOKENS.add_token :DQCONT, /\}/ do |lexer, value| lexer.tokenize_interpolated_string(DQ_continuation_token_types) end #:stopdoc: # Issue #4161 def (TOKENS[:DQCONT]).acceptable?(context={}) context[:string_interpolation_depth] > 0 end #:startdoc: + TOKENS.add_token :DOLLAR_VAR_WITH_DASH, %r{\$(::)?([-\w]+::)*[-\w]+} do |lexer, value| + msg = "Using `-` in variable names is deprecated at #{lexer.file || ''}:#{lexer.line}" + Puppet.deprecation_warning(msg) + + [TOKENS[:VARIABLE], value[1..-1]] + end + def (TOKENS[:DOLLAR_VAR_WITH_DASH]).acceptable?(context = {}) + !!Puppet[:allow_variables_with_dashes] + end + TOKENS.add_token :DOLLAR_VAR, %r{\$(::)?(\w+::)*\w+} do |lexer, value| [TOKENS[:VARIABLE],value[1..-1]] end + TOKENS.add_token :VARIABLE_WITH_DASH, %r{(::)?([-\w]+::)*[-\w]+} do |lexer, value| + msg = "Using `-` in variable names is deprecated at #{lexer.file}:#{lexer.line}" + Puppet.deprecation_warning(msg) + + [TOKENS[:VARIABLE], value] + end + #:stopdoc: # Issue #4161 + def (TOKENS[:VARIABLE_WITH_DASH]).acceptable?(context={}) + !!(Puppet[:allow_variables_with_dashes] and [:DQPRE,:DQMID].include?(context[:after])) + end + #:startdoc: + TOKENS.add_token :VARIABLE, %r{(::)?(\w+::)*\w+} #:stopdoc: # Issue #4161 def (TOKENS[:VARIABLE]).acceptable?(context={}) [:DQPRE,:DQMID].include? context[:after] end #:startdoc: TOKENS.sort_tokens @@pairs = { "{" => "}", "(" => ")", "[" => "]", "<|" => "|>", "<<|" => "|>>" } KEYWORDS = TokenList.new KEYWORDS.add_tokens( "case" => :CASE, "class" => :CLASS, "default" => :DEFAULT, "define" => :DEFINE, "import" => :IMPORT, "if" => :IF, "elsif" => :ELSIF, "else" => :ELSE, "inherits" => :INHERITS, "node" => :NODE, "and" => :AND, "or" => :OR, "undef" => :UNDEF, "false" => :FALSE, "true" => :TRUE, "in" => :IN ) def clear initvars end def expected return nil if @expected.empty? name = @expected[-1] TOKENS.lookup(name) or lex_error "Could not find expected token #{name}" end # scan the whole file # basically just used for testing def fullscan array = [] self.scan { |token, str| # Ignore any definition nesting problems @indefine = false array.push([token,str]) } array end def file=(file) @file = file @line = 1 contents = File.exists?(file) ? File.read(file) : "" @scanner = StringScanner.new(contents) end def shift_token @token_queue.shift end def find_string_token # We know our longest string token is three chars, so try each size in turn # until we either match or run out of chars. This way our worst-case is three # tries, where it is otherwise the number of string token we have. Also, # the lookups are optimized hash lookups, instead of regex scans. # s = @scanner.peek(3) token = TOKENS.lookup(s[0,3]) || TOKENS.lookup(s[0,2]) || TOKENS.lookup(s[0,1]) [ token, token && @scanner.scan(token.regex) ] end # Find the next token that matches a regex. We look for these first. def find_regex_token best_token = nil best_length = 0 # I tried optimizing based on the first char, but it had # a slightly negative affect and was a good bit more complicated. TOKENS.regex_tokens.each do |token| if length = @scanner.match?(token.regex) and token.acceptable?(lexing_context) # We've found a longer match if length > best_length best_length = length best_token = token end end end return best_token, @scanner.scan(best_token.regex) if best_token end # Find the next token, returning the string and the token. def find_token shift_token || find_regex_token || find_string_token end def indefine? if defined?(@indefine) @indefine else false end end def initialize initvars end def initvars @line = 1 @previous_token = nil @scanner = nil @file = nil # AAARRGGGG! okay, regexes in ruby are bloody annoying # no one else has "\n" =~ /\s/ @skip = %r{[ \t\r]+} @namestack = [] @token_queue = [] @indefine = false @expected = [] @commentstack = [ ['', @line] ] @lexing_context = { :after => nil, :start_of_line => true, :string_interpolation_depth => 0 } end # Make any necessary changes to the token and/or value. def munge_token(token, value) @line += 1 if token.incr_line skip if token.skip_text return if token.skip and not token.accumulate? token, value = token.convert(self, value) if token.respond_to?(:convert) return unless token if token.accumulate? comment = @commentstack.pop comment[0] << value + "\n" @commentstack.push(comment) end return if token.skip return token, { :value => value, :line => @line } end # Go up one in the namespace. def namepop @namestack.pop end # Collect the current namespace. def namespace @namestack.join("::") end # This value might have :: in it, but we don't care -- it'll be # handled normally when joining, and when popping we want to pop # this full value, however long the namespace is. def namestack(value) @namestack << value end def rest @scanner.rest end # this is the heart of the lexer def scan #Puppet.debug("entering scan") lex_error "Invalid or empty string" unless @scanner # Skip any initial whitespace. skip until token_queue.empty? and @scanner.eos? do yielded = false matched_token, value = find_token # error out if we didn't match anything at all lex_error "Could not match #{@scanner.rest[/^(\S+|\s+|.*)/]}" unless matched_token newline = matched_token.name == :RETURN # this matches a blank line; eat the previously accumulated comments getcomment if lexing_context[:start_of_line] and newline lexing_context[:start_of_line] = newline final_token, token_value = munge_token(matched_token, value) unless final_token skip next end lexing_context[:after] = final_token.name unless newline lexing_context[:string_interpolation_depth] += 1 if final_token.name == :DQPRE lexing_context[:string_interpolation_depth] -= 1 if final_token.name == :DQPOST value = token_value[:value] if match = @@pairs[value] and final_token.name != :DQUOTE and final_token.name != :SQUOTE @expected << match elsif exp = @expected[-1] and exp == value and final_token.name != :DQUOTE and final_token.name != :SQUOTE @expected.pop end if final_token.name == :LBRACE or final_token.name == :LPAREN commentpush end if final_token.name == :RPAREN commentpop end yield [final_token.name, token_value] if @previous_token namestack(value) if @previous_token.name == :CLASS and value != '{' if @previous_token.name == :DEFINE if indefine? msg = "Cannot nest definition #{value} inside #{@indefine}" self.indefine = false raise Puppet::ParseError, msg end @indefine = value end end @previous_token = final_token skip end @scanner = nil # This indicates that we're done parsing. yield [false,false] end # Skip any skipchars in our remaining string. def skip @scanner.skip(@skip) end # Provide some limited access to the scanner, for those # tokens that need it. def scan_until(regex) @scanner.scan_until(regex) end # we've encountered the start of a string... # slurp in the rest of the string and return it def slurpstring(terminators,escapes=%w{ \\ $ ' " r n t s }+["\n"],ignore_invalid_escapes=false) # we search for the next quote that isn't preceded by a # backslash; the caret is there to match empty strings str = @scanner.scan_until(/([^\\]|^|[^\\])([\\]{2})*[#{terminators}]/) or lex_error "Unclosed quote after '#{last}' in '#{rest}'" @line += str.count("\n") # literal carriage returns add to the line count. str.gsub!(/\\(.)/m) { ch = $1 if escapes.include? ch case ch when 'r'; "\r" when 'n'; "\n" when 't'; "\t" when 's'; " " when "\n"; '' else ch end else Puppet.warning "Unrecognised escape sequence '\\#{ch}'#{file && " in file #{file}"}#{line && " at line #{line}"}" unless ignore_invalid_escapes "\\#{ch}" end } [ str[0..-2],str[-1,1] ] end def tokenize_interpolated_string(token_type,preamble='') value,terminator = slurpstring('"$') token_queue << [TOKENS[token_type[terminator]],preamble+value] if terminator != '$' or @scanner.scan(/\{/) token_queue.shift + elsif Puppet[:allow_variables_with_dashes] and var_name = @scanner.scan(TOKENS[:VARIABLE_WITH_DASH].regex) + token_queue << [TOKENS[:VARIABLE],var_name] + tokenize_interpolated_string(DQ_continuation_token_types) elsif var_name = @scanner.scan(TOKENS[:VARIABLE].regex) token_queue << [TOKENS[:VARIABLE],var_name] tokenize_interpolated_string(DQ_continuation_token_types) else tokenize_interpolated_string(token_type,token_queue.pop.last + terminator) end end # just parse a string, not a whole file def string=(string) @scanner = StringScanner.new(string) end # returns the content of the currently accumulated content cache def commentpop @commentstack.pop[0] end def getcomment(line = nil) comment = @commentstack.last if line.nil? or comment[1] <= line @commentstack.pop @commentstack.push(['', @line]) return comment[0] end '' end def commentpush @commentstack.push(['', @line]) end end diff --git a/spec/unit/parser/lexer_spec.rb b/spec/unit/parser/lexer_spec.rb index 670695ff4..9bac1630b 100755 --- a/spec/unit/parser/lexer_spec.rb +++ b/spec/unit/parser/lexer_spec.rb @@ -1,755 +1,884 @@ #!/usr/bin/env rspec require 'spec_helper' require 'puppet/parser/lexer' # This is a special matcher to match easily lexer output RSpec::Matchers.define :be_like do |*expected| match do |actual| expected.zip(actual).all? { |e,a| !e or a[0] == e or (e.is_a? Array and a[0] == e[0] and (a[1] == e[1] or (a[1].is_a?(Hash) and a[1][:value] == e[1]))) } end end __ = nil +def tokens_scanned_from(s) + lexer = Puppet::Parser::Lexer.new + lexer.string = s + lexer.fullscan[0..-2] +end + + describe Puppet::Parser::Lexer do describe "when reading strings" do before { @lexer = Puppet::Parser::Lexer.new } it "should increment the line count for every carriage return in the string" do @lexer.line = 10 @lexer.string = "this\nis\natest'" @lexer.slurpstring("'") @lexer.line.should == 12 end it "should not increment the line count for escapes in the string" do @lexer.line = 10 @lexer.string = "this\\nis\\natest'" @lexer.slurpstring("'") @lexer.line.should == 10 end it "should not think the terminator is escaped, when preceeded by an even number of backslashes" do @lexer.line = 10 @lexer.string = "here\nis\nthe\nstring\\\\'with\nextra\njunk" @lexer.slurpstring("'") @lexer.line.should == 13 end { 'r' => "\r", 'n' => "\n", 't' => "\t", 's' => " " }.each do |esc, expected_result| it "should recognize \\#{esc} sequence" do @lexer.string = "\\#{esc}'" @lexer.slurpstring("'")[0].should == expected_result end end end end describe Puppet::Parser::Lexer::Token do before do @token = Puppet::Parser::Lexer::Token.new(%r{something}, :NAME) end [:regex, :name, :string, :skip, :incr_line, :skip_text, :accumulate].each do |param| it "should have a #{param.to_s} reader" do @token.should be_respond_to(param) end it "should have a #{param.to_s} writer" do @token.should be_respond_to(param.to_s + "=") end end end describe Puppet::Parser::Lexer::Token, "when initializing" do it "should create a regex if the first argument is a string" do Puppet::Parser::Lexer::Token.new("something", :NAME).regex.should == %r{something} end it "should set the string if the first argument is one" do Puppet::Parser::Lexer::Token.new("something", :NAME).string.should == "something" end it "should set the regex if the first argument is one" do Puppet::Parser::Lexer::Token.new(%r{something}, :NAME).regex.should == %r{something} end end describe Puppet::Parser::Lexer::TokenList do before do @list = Puppet::Parser::Lexer::TokenList.new end it "should have a method for retrieving tokens by the name" do token = @list.add_token :name, "whatever" @list[:name].should equal(token) end it "should have a method for retrieving string tokens by the string" do token = @list.add_token :name, "whatever" @list.lookup("whatever").should equal(token) end it "should add tokens to the list when directed" do token = @list.add_token :name, "whatever" @list[:name].should equal(token) end it "should have a method for adding multiple tokens at once" do @list.add_tokens "whatever" => :name, "foo" => :bar @list[:name].should_not be_nil @list[:bar].should_not be_nil end it "should fail to add tokens sharing a name with an existing token" do @list.add_token :name, "whatever" expect { @list.add_token :name, "whatever" }.to raise_error(ArgumentError) end it "should set provided options on tokens being added" do token = @list.add_token :name, "whatever", :skip_text => true token.skip_text.should == true end it "should define any provided blocks as a :convert method" do token = @list.add_token(:name, "whatever") do "foo" end token.convert.should == "foo" end it "should store all string tokens in the :string_tokens list" do one = @list.add_token(:name, "1") @list.string_tokens.should be_include(one) end it "should store all regex tokens in the :regex_tokens list" do one = @list.add_token(:name, %r{one}) @list.regex_tokens.should be_include(one) end it "should not store string tokens in the :regex_tokens list" do one = @list.add_token(:name, "1") @list.regex_tokens.should_not be_include(one) end it "should not store regex tokens in the :string_tokens list" do one = @list.add_token(:name, %r{one}) @list.string_tokens.should_not be_include(one) end it "should sort the string tokens inversely by length when asked" do one = @list.add_token(:name, "1") two = @list.add_token(:other, "12") @list.sort_tokens @list.string_tokens.should == [two, one] end end describe Puppet::Parser::Lexer::TOKENS do before do @lexer = Puppet::Parser::Lexer.new end { :LBRACK => '[', :RBRACK => ']', :LBRACE => '{', :RBRACE => '}', :LPAREN => '(', :RPAREN => ')', :EQUALS => '=', :ISEQUAL => '==', :GREATEREQUAL => '>=', :GREATERTHAN => '>', :LESSTHAN => '<', :LESSEQUAL => '<=', :NOTEQUAL => '!=', :NOT => '!', :COMMA => ',', :DOT => '.', :COLON => ':', :AT => '@', :LLCOLLECT => '<<|', :RRCOLLECT => '|>>', :LCOLLECT => '<|', :RCOLLECT => '|>', :SEMIC => ';', :QMARK => '?', :BACKSLASH => '\\', :FARROW => '=>', :PARROW => '+>', :APPENDS => '+=', :PLUS => '+', :MINUS => '-', :DIV => '/', :TIMES => '*', :LSHIFT => '<<', :RSHIFT => '>>', :MATCH => '=~', :NOMATCH => '!~', :IN_EDGE => '->', :OUT_EDGE => '<-', :IN_EDGE_SUB => '~>', :OUT_EDGE_SUB => '<~', }.each do |name, string| it "should have a token named #{name.to_s}" do Puppet::Parser::Lexer::TOKENS[name].should_not be_nil end it "should match '#{string}' for the token #{name.to_s}" do Puppet::Parser::Lexer::TOKENS[name].string.should == string end end { "case" => :CASE, "class" => :CLASS, "default" => :DEFAULT, "define" => :DEFINE, "import" => :IMPORT, "if" => :IF, "elsif" => :ELSIF, "else" => :ELSE, "inherits" => :INHERITS, "node" => :NODE, "and" => :AND, "or" => :OR, "undef" => :UNDEF, "false" => :FALSE, "true" => :TRUE, "in" => :IN, }.each do |string, name| it "should have a keyword named #{name.to_s}" do Puppet::Parser::Lexer::KEYWORDS[name].should_not be_nil end it "should have the keyword for #{name.to_s} set to #{string}" do Puppet::Parser::Lexer::KEYWORDS[name].string.should == string end end # These tokens' strings don't matter, just that the tokens exist. [:STRING, :DQPRE, :DQMID, :DQPOST, :BOOLEAN, :NAME, :NUMBER, :COMMENT, :MLCOMMENT, :RETURN, :SQUOTE, :DQUOTE, :VARIABLE].each do |name| it "should have a token named #{name.to_s}" do Puppet::Parser::Lexer::TOKENS[name].should_not be_nil end end end describe Puppet::Parser::Lexer::TOKENS[:CLASSREF] do before { @token = Puppet::Parser::Lexer::TOKENS[:CLASSREF] } it "should match against single upper-case alpha-numeric terms" do @token.regex.should =~ "One" end it "should match against upper-case alpha-numeric terms separated by double colons" do @token.regex.should =~ "One::Two" end it "should match against many upper-case alpha-numeric terms separated by double colons" do @token.regex.should =~ "One::Two::Three::Four::Five" end it "should match against upper-case alpha-numeric terms prefixed by double colons" do @token.regex.should =~ "::One" end end describe Puppet::Parser::Lexer::TOKENS[:NAME] do before { @token = Puppet::Parser::Lexer::TOKENS[:NAME] } it "should match against lower-case alpha-numeric terms" do @token.regex.should =~ "one-two" end it "should return itself and the value if the matched term is not a keyword" do Puppet::Parser::Lexer::KEYWORDS.expects(:lookup).returns(nil) @token.convert(stub("lexer"), "myval").should == [Puppet::Parser::Lexer::TOKENS[:NAME], "myval"] end it "should return the keyword token and the value if the matched term is a keyword" do keyword = stub 'keyword', :name => :testing Puppet::Parser::Lexer::KEYWORDS.expects(:lookup).returns(keyword) @token.convert(stub("lexer"), "myval").should == [keyword, "myval"] end it "should return the BOOLEAN token and 'true' if the matched term is the string 'true'" do keyword = stub 'keyword', :name => :TRUE Puppet::Parser::Lexer::KEYWORDS.expects(:lookup).returns(keyword) @token.convert(stub('lexer'), "true").should == [Puppet::Parser::Lexer::TOKENS[:BOOLEAN], true] end it "should return the BOOLEAN token and 'false' if the matched term is the string 'false'" do keyword = stub 'keyword', :name => :FALSE Puppet::Parser::Lexer::KEYWORDS.expects(:lookup).returns(keyword) @token.convert(stub('lexer'), "false").should == [Puppet::Parser::Lexer::TOKENS[:BOOLEAN], false] end it "should match against lower-case alpha-numeric terms separated by double colons" do @token.regex.should =~ "one::two" end it "should match against many lower-case alpha-numeric terms separated by double colons" do @token.regex.should =~ "one::two::three::four::five" end it "should match against lower-case alpha-numeric terms prefixed by double colons" do @token.regex.should =~ "::one" end it "should match against nested terms starting with numbers" do @token.regex.should =~ "::1one::2two::3three" end end describe Puppet::Parser::Lexer::TOKENS[:NUMBER] do before do @token = Puppet::Parser::Lexer::TOKENS[:NUMBER] @regex = @token.regex end it "should match against numeric terms" do @regex.should =~ "2982383139" end it "should match against float terms" do @regex.should =~ "29823.235" end it "should match against hexadecimal terms" do @regex.should =~ "0xBEEF0023" end it "should match against float with exponent terms" do @regex.should =~ "10e23" end it "should match against float terms with negative exponents" do @regex.should =~ "10e-23" end it "should match against float terms with fractional parts and exponent" do @regex.should =~ "1.234e23" end it "should return the NAME token and the value" do @token.convert(stub("lexer"), "myval").should == [Puppet::Parser::Lexer::TOKENS[:NAME], "myval"] end end describe Puppet::Parser::Lexer::TOKENS[:COMMENT] do before { @token = Puppet::Parser::Lexer::TOKENS[:COMMENT] } it "should match against lines starting with '#'" do @token.regex.should =~ "# this is a comment" end it "should be marked to get skipped" do @token.skip?.should be_true end it "should be marked to accumulate" do @token.accumulate?.should be_true end it "'s block should return the comment without the #" do @token.convert(@lexer,"# this is a comment")[1].should == "this is a comment" end end describe Puppet::Parser::Lexer::TOKENS[:MLCOMMENT] do before do @token = Puppet::Parser::Lexer::TOKENS[:MLCOMMENT] @lexer = stub 'lexer', :line => 0 end it "should match against lines enclosed with '/*' and '*/'" do @token.regex.should =~ "/* this is a comment */" end it "should match multiple lines enclosed with '/*' and '*/'" do @token.regex.should =~ """/* this is a comment */""" end it "should increase the lexer current line number by the amount of lines spanned by the comment" do @lexer.expects(:line=).with(2) @token.convert(@lexer, "1\n2\n3") end it "should not greedily match comments" do match = @token.regex.match("/* first */ word /* second */") match[1].should == " first " end it "should be marked to accumulate" do @token.accumulate?.should be_true end it "'s block should return the comment without the comment marks" do @lexer.stubs(:line=).with(0) @token.convert(@lexer,"/* this is a comment */")[1].should == "this is a comment" end end describe Puppet::Parser::Lexer::TOKENS[:RETURN] do before { @token = Puppet::Parser::Lexer::TOKENS[:RETURN] } it "should match against carriage returns" do @token.regex.should =~ "\n" end it "should be marked to initiate text skipping" do @token.skip_text.should be_true end it "should be marked to increment the line" do @token.incr_line.should be_true end end -shared_examples_for "variable names in the lexer" do |prefix| + +shared_examples_for "handling `-` in standard variable names" do |prefix| # Watch out - a regex might match a *prefix* on these, not just the whole # word, so make sure you don't have false positive or negative results based # on that. legal = %w{f foo f::b foo::b f::bar foo::bar 3 foo3 3foo} illegal = %w{f- f-o -f f::-o f::o- f::o-o} ["", "::"].each do |global_scope| legal.each do |name| var = prefix + global_scope + name it "should accept #{var.inspect} as a valid variable name" do (subject.regex.match(var) || [])[0].should == var end end illegal.each do |name| var = prefix + global_scope + name - it "should NOT accept #{var.inspect} as a valid variable name" do + it "when `variable_with_dash` is disabled it should NOT accept #{var.inspect} as a valid variable name" do + Puppet[:allow_variables_with_dashes] = false + (subject.regex.match(var) || [])[0].should_not == var + end + + it "when `variable_with_dash` is enabled it should NOT accept #{var.inspect} as a valid variable name" do + Puppet[:allow_variables_with_dashes] = true (subject.regex.match(var) || [])[0].should_not == var end end end end describe Puppet::Parser::Lexer::TOKENS[:DOLLAR_VAR] do its(:skip_text) { should be_false } its(:incr_line) { should be_false } - it_should_behave_like "variable names in the lexer", '$' + it_should_behave_like "handling `-` in standard variable names", '$' end describe Puppet::Parser::Lexer::TOKENS[:VARIABLE] do its(:skip_text) { should be_false } its(:incr_line) { should be_false } - it_should_behave_like "variable names in the lexer", '' + it_should_behave_like "handling `-` in standard variable names", '' end -def tokens_scanned_from(s) - lexer = Puppet::Parser::Lexer.new - lexer.string = s - lexer.fullscan[0..-2] +describe "the horrible deprecation / compatibility variables with dashes" do + NamesWithDashes = %w{f- f-o -f f::-o f::o- f::o-o} + + { Puppet::Parser::Lexer::TOKENS[:DOLLAR_VAR_WITH_DASH] => '$', + Puppet::Parser::Lexer::TOKENS[:VARIABLE_WITH_DASH] => '' + }.each do |token, prefix| + describe token do + its(:skip_text) { should be_false } + its(:incr_line) { should be_false } + + context "when compatibly is disabled" do + before :each do Puppet[:allow_variables_with_dashes] = false end + Puppet::Parser::Lexer::TOKENS.each do |name, value| + it "should be unacceptable after #{name}" do + token.acceptable?(:after => name).should be_false + end + end + + # Yes, this should still *match*, just not be acceptable. + NamesWithDashes.each do |name| + ["", "::"].each do |global_scope| + var = prefix + global_scope + name + it "should match #{var.inspect}" do + (subject.regex.match(var) || [])[0].should == var + end + end + end + end + + context "when compatibility is enabled" do + before :each do Puppet[:allow_variables_with_dashes] = true end + it "should be acceptable after DQPRE" do + token.acceptable?(:after => :DQPRE).should be_true + end + + NamesWithDashes.each do |name| + ["", "::"].each do |global_scope| + var = prefix + global_scope + name + it "should match #{var.inspect}" do + (subject.regex.match(var) || [])[0].should == var + end + end + end + end + end + end + + context "deprecation warnings" do + before :each do Puppet[:allow_variables_with_dashes] = true end + + it "should match a top level variable" do + Puppet.expects(:deprecation_warning).once + src = %q'$foo-bar = 12' + tokens_scanned_from(src).should == [ + [:VARIABLE, { :value => 'foo-bar', :line => 1 }], + [:EQUALS, { :value => '=', :line => 1 }], + [:NAME, { :value => '12', :line => 1 }], + ] + end + + it "should match a variable in a class with a `-`" do + Puppet.expects(:deprecation_warning).once + src = %q'class foo-bar { $baz-quux = 12 }' + tokens_scanned_from(src).should == [ + [:CLASS, { :value => "class", :line => 1 }], + [:NAME, { :value => "foo-bar", :line => 1 }], + [:LBRACE, { :value => "{", :line => 1 }], + [:VARIABLE, { :value => "baz-quux", :line => 1 }], + [:EQUALS, { :value => "=", :line => 1 }], + [:NAME, { :value => "12", :line => 1 }], + [:RBRACE, { :value => "}", :line => 1 }] + ] + end + + it "should match a scoped reference with a `-`" do + Puppet.expects(:deprecation_warning).times(3) + # working around horrible Ruby syntax in editors. + src = [ + 'class foo-bar {', + ' $baz-quux = 12', + '}', + 'include foo-bar', + 'notice($::foo-bar::baz-quux)', + 'notice("$::foo-bar::baz-quux")', + 'notice("${::foo-bar::baz-quux}")' + ].join("\n") + + # Note: this is a hand-checked parse tree. Be careful about changing it + # without understanding why the lexer works how it does. Especially, be + # wary of changing anything that touches a :VARIABLE output. + tokens_scanned_from(src).should == [ + [:CLASS, { :value => "class", :line => 1 }], + [:NAME, { :value => "foo-bar", :line => 1 }], + [:LBRACE, { :value => "{", :line => 1 }], + [:VARIABLE, { :value => "baz-quux", :line => 2 }], + [:EQUALS, { :value => "=", :line => 2 }], + [:NAME, { :value => "12", :line => 2 }], + [:RBRACE, { :value => "}", :line => 3 }], + [:NAME, { :value => "include", :line => 4 }], + [:NAME, { :value => "foo-bar", :line => 4 }], + [:NAME, { :value => "notice", :line => 5 }], + [:LPAREN, { :value => "(", :line => 5 }], + [:VARIABLE, { :value => "::foo-bar::baz-quux", :line => 5 }], + [:RPAREN, { :value => ")", :line => 5 }], + [:NAME, { :value => "notice", :line => 6 }], + [:LPAREN, { :value => "(", :line => 6 }], + [:DQPRE, { :value => "", :line => 6 }], + [:VARIABLE, { :value => "::foo-bar::baz-quux", :line => 6 }], + [:DQPOST, { :value => "", :line => 6 }], + [:RPAREN, { :value => ")", :line => 6 }], + [:NAME, { :value => "notice", :line => 7 }], + [:LPAREN, { :value => "(", :line => 7 }], + [:DQPRE, { :value => "", :line => 7 }], + [:VARIABLE, { :value => "::foo-bar::baz-quux", :line => 7 }], + [:DQPOST, { :value => "", :line => 7 }], + [:RPAREN, { :value => ")", :line => 7 }] + ] + end + end end describe Puppet::Parser::Lexer,"when lexing strings" do { %q{'single quoted string')} => [[:STRING,'single quoted string']], %q{"double quoted string"} => [[:STRING,'double quoted string']], %q{'single quoted string with an escaped "\\'"'} => [[:STRING,'single quoted string with an escaped "\'"']], %q{'single quoted string with an escaped "\$"'} => [[:STRING,'single quoted string with an escaped "\$"']], %q{'single quoted string with an escaped "\."'} => [[:STRING,'single quoted string with an escaped "\."']], %q{'single quoted string with an escaped "\r\n"'} => [[:STRING,'single quoted string with an escaped "\r\n"']], %q{'single quoted string with an escaped "\n"'} => [[:STRING,'single quoted string with an escaped "\n"']], %q{'single quoted string with an escaped "\\\\"'} => [[:STRING,'single quoted string with an escaped "\\\\"']], %q{"string with an escaped '\\"'"} => [[:STRING,"string with an escaped '\"'"]], %q{"string with an escaped '\\$'"} => [[:STRING,"string with an escaped '$'"]], %Q{"string with a line ending with a backslash: \\\nfoo"} => [[:STRING,"string with a line ending with a backslash: foo"]], %q{"string with $v (but no braces)"} => [[:DQPRE,"string with "],[:VARIABLE,'v'],[:DQPOST,' (but no braces)']], %q["string with ${v} in braces"] => [[:DQPRE,"string with "],[:VARIABLE,'v'],[:DQPOST,' in braces']], %q["string with ${qualified::var} in braces"] => [[:DQPRE,"string with "],[:VARIABLE,'qualified::var'],[:DQPOST,' in braces']], %q{"string with $v and $v (but no braces)"} => [[:DQPRE,"string with "],[:VARIABLE,"v"],[:DQMID," and "],[:VARIABLE,"v"],[:DQPOST," (but no braces)"]], %q["string with ${v} and ${v} in braces"] => [[:DQPRE,"string with "],[:VARIABLE,"v"],[:DQMID," and "],[:VARIABLE,"v"],[:DQPOST," in braces"]], %q["string with ${'a nested single quoted string'} inside it."] => [[:DQPRE,"string with "],[:STRING,'a nested single quoted string'],[:DQPOST,' inside it.']], %q["string with ${['an array ',$v2]} in it."] => [[:DQPRE,"string with "],:LBRACK,[:STRING,"an array "],:COMMA,[:VARIABLE,"v2"],:RBRACK,[:DQPOST," in it."]], %q{a simple "scanner" test} => [[:NAME,"a"],[:NAME,"simple"], [:STRING,"scanner"],[:NAME,"test"]], %q{a simple 'single quote scanner' test} => [[:NAME,"a"],[:NAME,"simple"], [:STRING,"single quote scanner"],[:NAME,"test"]], %q{a harder 'a $b \c"'} => [[:NAME,"a"],[:NAME,"harder"], [:STRING,'a $b \c"']], %q{a harder "scanner test"} => [[:NAME,"a"],[:NAME,"harder"], [:STRING,"scanner test"]], %q{a hardest "scanner \"test\""} => [[:NAME,"a"],[:NAME,"hardest"],[:STRING,'scanner "test"']], %Q{a hardestest "scanner \\"test\\"\n"} => [[:NAME,"a"],[:NAME,"hardestest"],[:STRING,%Q{scanner "test"\n}]], %q{function("call")} => [[:NAME,"function"],[:LPAREN,"("],[:STRING,'call'],[:RPAREN,")"]], %q["string with ${(3+5)/4} nested math."] => [[:DQPRE,"string with "],:LPAREN,[:NAME,"3"],:PLUS,[:NAME,"5"],:RPAREN,:DIV,[:NAME,"4"],[:DQPOST," nested math."]], %q["$$$$"] => [[:STRING,"$$$$"]], %q["$variable"] => [[:DQPRE,""],[:VARIABLE,"variable"],[:DQPOST,""]], %q["$var$other"] => [[:DQPRE,""],[:VARIABLE,"var"],[:DQMID,""],[:VARIABLE,"other"],[:DQPOST,""]], %q["foo$bar$"] => [[:DQPRE,"foo"],[:VARIABLE,"bar"],[:DQPOST,"$"]], %q["foo$$bar"] => [[:DQPRE,"foo$"],[:VARIABLE,"bar"],[:DQPOST,""]], %q[""] => [[:STRING,""]], %q["123 456 789 0"] => [[:STRING,"123 456 789 0"]], %q["${123} 456 $0"] => [[:DQPRE,""],[:VARIABLE,"123"],[:DQMID," 456 "],[:VARIABLE,"0"],[:DQPOST,""]], %q["$foo::::bar"] => [[:DQPRE,""],[:VARIABLE,"foo"],[:DQPOST,"::::bar"]] }.each { |src,expected_result| it "should handle #{src} correctly" do tokens_scanned_from(src).should be_like(*expected_result) end } end describe Puppet::Parser::Lexer::TOKENS[:DOLLAR_VAR] do before { @token = Puppet::Parser::Lexer::TOKENS[:DOLLAR_VAR] } it "should match against alpha words prefixed with '$'" do @token.regex.should =~ '$this_var' end it "should return the VARIABLE token and the variable name stripped of the '$'" do @token.convert(stub("lexer"), "$myval").should == [Puppet::Parser::Lexer::TOKENS[:VARIABLE], "myval"] end end describe Puppet::Parser::Lexer::TOKENS[:REGEX] do before { @token = Puppet::Parser::Lexer::TOKENS[:REGEX] } it "should match against any expression enclosed in //" do @token.regex.should =~ '/this is a regex/' end it 'should not match if there is \n in the regex' do @token.regex.should_not =~ "/this is \n a regex/" end describe "when scanning" do it "should not consider escaped slashes to be the end of a regex" do tokens_scanned_from("$x =~ /this \\/ foo/").should be_like(__,__,[:REGEX,%r{this / foo}]) end it "should not lex chained division as a regex" do tokens_scanned_from("$x = $a/$b/$c").collect { |name, data| name }.should_not be_include( :REGEX ) end it "should accept a regular expression after NODE" do tokens_scanned_from("node /www.*\.mysite\.org/").should be_like(__,[:REGEX,Regexp.new("www.*\.mysite\.org")]) end it "should accept regular expressions in a CASE" do s = %q{case $variable { "something": {$othervar = 4096 / 2} /regex/: {notice("this notably sucks")} } } tokens_scanned_from(s).should be_like( :CASE,:VARIABLE,:LBRACE,:STRING,:COLON,:LBRACE,:VARIABLE,:EQUALS,:NAME,:DIV,:NAME,:RBRACE,[:REGEX,/regex/],:COLON,:LBRACE,:NAME,:LPAREN,:STRING,:RPAREN,:RBRACE,:RBRACE ) end end it "should return the REGEX token and a Regexp" do @token.convert(stub("lexer"), "/myregex/").should == [Puppet::Parser::Lexer::TOKENS[:REGEX], Regexp.new(/myregex/)] end end describe Puppet::Parser::Lexer, "when lexing comments" do before { @lexer = Puppet::Parser::Lexer.new } it "should accumulate token in munge_token" do token = stub 'token', :skip => true, :accumulate? => true, :incr_line => nil, :skip_text => false token.stubs(:convert).with(@lexer, "# this is a comment").returns([token, " this is a comment"]) @lexer.munge_token(token, "# this is a comment") @lexer.munge_token(token, "# this is a comment") @lexer.getcomment.should == " this is a comment\n this is a comment\n" end it "should add a new comment stack level on LBRACE" do @lexer.string = "{" @lexer.expects(:commentpush) @lexer.fullscan end it "should add a new comment stack level on LPAREN" do @lexer.string = "(" @lexer.expects(:commentpush) @lexer.fullscan end it "should pop the current comment on RPAREN" do @lexer.string = ")" @lexer.expects(:commentpop) @lexer.fullscan end it "should return the current comments on getcomment" do @lexer.string = "# comment" @lexer.fullscan @lexer.getcomment.should == "comment\n" end it "should discard the previous comments on blank line" do @lexer.string = "# 1\n\n# 2" @lexer.fullscan @lexer.getcomment.should == "2\n" end it "should skip whitespace before lexing the next token after a non-token" do tokens_scanned_from("/* 1\n\n */ \ntest").should be_like([:NAME, "test"]) end it "should not return comments seen after the current line" do @lexer.string = "# 1\n\n# 2" @lexer.fullscan @lexer.getcomment(1).should == "" end it "should return a comment seen before the current line" do @lexer.string = "# 1\n# 2" @lexer.fullscan @lexer.getcomment(2).should == "1\n2\n" end end # FIXME: We need to rewrite all of these tests, but I just don't want to take the time right now. describe "Puppet::Parser::Lexer in the old tests" do before { @lexer = Puppet::Parser::Lexer.new } it "should do simple lexing" do { %q{\\} => [[:BACKSLASH,"\\"]], %q{simplest scanner test} => [[:NAME,"simplest"],[:NAME,"scanner"],[:NAME,"test"]], %Q{returned scanner test\n} => [[:NAME,"returned"],[:NAME,"scanner"],[:NAME,"test"]] }.each { |source,expected| tokens_scanned_from(source).should be_like(*expected) } end it "should fail usefully" do expect { tokens_scanned_from('^') }.to raise_error(RuntimeError) end it "should fail if the string is not set" do expect { @lexer.fullscan }.to raise_error(Puppet::LexError) end it "should correctly identify keywords" do tokens_scanned_from("case").should be_like([:CASE, "case"]) end it "should correctly parse class references" do %w{Many Different Words A Word}.each { |t| tokens_scanned_from(t).should be_like([:CLASSREF,t])} end # #774 it "should correctly parse namespaced class refernces token" do %w{Foo ::Foo Foo::Bar ::Foo::Bar}.each { |t| tokens_scanned_from(t).should be_like([:CLASSREF, t]) } end it "should correctly parse names" do %w{this is a bunch of names}.each { |t| tokens_scanned_from(t).should be_like([:NAME,t]) } end it "should correctly parse names with numerals" do %w{1name name1 11names names11}.each { |t| tokens_scanned_from(t).should be_like([:NAME,t]) } end it "should correctly parse empty strings" do expect { tokens_scanned_from('$var = ""') }.to_not raise_error end it "should correctly parse virtual resources" do tokens_scanned_from("@type {").should be_like([:AT, "@"], [:NAME, "type"], [:LBRACE, "{"]) end it "should correctly deal with namespaces" do @lexer.string = %{class myclass} @lexer.fullscan @lexer.namespace.should == "myclass" @lexer.namepop @lexer.namespace.should == "" @lexer.string = "class base { class sub { class more" @lexer.fullscan @lexer.namespace.should == "base::sub::more" @lexer.namepop @lexer.namespace.should == "base::sub" end it "should not put class instantiation on the namespace" do @lexer.string = "class base { class sub { class { mode" @lexer.fullscan @lexer.namespace.should == "base::sub" end it "should correctly handle fully qualified names" do @lexer.string = "class base { class sub::more {" @lexer.fullscan @lexer.namespace.should == "base::sub::more" @lexer.namepop @lexer.namespace.should == "base" end it "should correctly lex variables" do ["$variable", "$::variable", "$qualified::variable", "$further::qualified::variable"].each do |string| tokens_scanned_from(string).should be_like([:VARIABLE,string.sub(/^\$/,'')]) end end it "should end variables at `-`" do tokens_scanned_from('$hyphenated-variable'). should be_like [:VARIABLE, "hyphenated"], [:MINUS, '-'], [:NAME, 'variable'] end it "should not include whitespace in a variable" do tokens_scanned_from("$foo bar").should_not be_like([:VARIABLE, "foo bar"]) end it "should not include excess colons in a variable" do tokens_scanned_from("$foo::::bar").should_not be_like([:VARIABLE, "foo::::bar"]) end end describe "Puppet::Parser::Lexer in the old tests when lexing example files" do my_fixtures('*.pp') do |file| it "should correctly lex #{file}" do lexer = Puppet::Parser::Lexer.new lexer.file = file expect { lexer.fullscan }.to_not raise_error end end end describe "when trying to lex an non-existent file" do include PuppetSpec::Files it "should return an empty list of tokens" do lexer = Puppet::Parser::Lexer.new lexer.file = nofile = tmpfile('lexer') File.exists?(nofile).should == false lexer.fullscan.should == [[false,false]] end end