summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ChangeLog25
-rw-r--r--lib/csv.rb706
-rw-r--r--test/csv/tc_csv_parsing.rb32
-rw-r--r--test/csv/tc_csv_writing.rb3
-rw-r--r--test/csv/tc_data_converters.rb3
-rw-r--r--test/csv/tc_encodings.rb255
-rw-r--r--test/csv/tc_features.rb100
-rw-r--r--test/csv/tc_headers.rb29
-rw-r--r--test/csv/tc_interface.rb79
-rw-r--r--test/csv/tc_row.rb23
-rw-r--r--test/csv/tc_serialization.rb3
-rw-r--r--test/csv/tc_table.rb16
-rw-r--r--test/csv/ts_all.rb4
13 files changed, 1106 insertions, 172 deletions
diff --git a/ChangeLog b/ChangeLog
index adb830964..512784bd4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,28 @@
+Sun Sep 21 09:37:57 2008 James Edward Gray II <jeg2@ruby-lang.org>
+
+ * lib/csv/csv.rb: Reworked CSV's parser and generator to be m17n. Data
+ is now parsed in the Encoding it is in without need for translation.
+ * lib/csv/csv.rb: Improved inspect() messages for better IRb support.
+ * lib/csv/csv.rb: Fixed header writing bug reported by Dov Murik.
+ * lib/csv/csv.rb: Use custom separators in parsing header Strings as
+ suggested by Shmulik Regev.
+ * lib/csv/csv.rb: Added a :write_headers option for outputting headers.
+ * lib/csv/csv.rb: Handle open() calls in binary mode whenever we can to
+ workaround a Windows issue where line-ending translation can cause an
+ off-by-one error in seeking back to a non-zero starting position after
+ auto-discovery for :row_sep as suggested by Robert Battle.
+ * lib/csv/csv.rb: Improved the parser to fail faster when fed some forms
+ of invalid CSV that can be detected without reading ahead.
+ * lib/csv/csv.rb: Added a :field_size_limit option to control CSV's
+ lookahead and prevent the parser from biting off more data than
+ it can chew.
+ * lib/csv/csv.rb: Added readers for CSV attributes: col_sep(), row_sep(),
+ quote_char(), field_size_limit(), converters(), unconverted_fields?(),
+ headers(), return_headers?(), write_headers?(), header_converters(),
+ skip_blanks?(), and force_quotes?().
+ * lib/csv/csv.rb: Cleaned up code syntax to be more inline with
+ Ruby 1.9 than 1.8.
+
Sun Sep 21 07:43:16 2008 Tadayoshi Funaba <tadf@dotrb.org>
* complex.c: an instance method image has been removed and
diff --git a/lib/csv.rb b/lib/csv.rb
index f60d5b1cb..dccee6cbe 100644
--- a/lib/csv.rb
+++ b/lib/csv.rb
@@ -1,5 +1,5 @@
-#!/usr/local/bin/ruby -w
-
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# = csv.rb -- CSV Reading and Writing
#
# Created by James Edward Gray II on 2005-10-31.
@@ -37,6 +37,7 @@
#
# === CSV Parsing
#
+# * This parser is m17n aware. See CSV for full details.
# * This library has a stricter parser and will throw MalformedCSVErrors on
# problematic data.
# * This library has a less liberal idea of a line ending than CSV. What you
@@ -91,7 +92,6 @@
require "forwardable"
require "English"
-require "enumerator"
require "date"
require "stringio"
@@ -130,7 +130,7 @@ require "stringio"
#
# === To a File
#
-# CSV.open("path/to/file.csv", "w") do |csv|
+# CSV.open("path/to/file.csv", "wb") do |csv|
# csv << ["row", "of", "CSV", "data"]
# csv << ["another", "row"]
# # ...
@@ -155,9 +155,51 @@ require "stringio"
# CSV(csv = "") { |csv_str| csv_str << %w{my data here} } # to a String
# CSV($stderr) { |csv_err| csv_err << %w{my data here} } # to $stderr
#
+# == CSV and Character Encodings (M17n or Multilingualization)
+#
+# This new CSV parser is m17n savvy. The parser works in the Encoding of the IO
+# or String object being read from or written to. Your data is never transcoded
+# (unless you ask Ruby to transcode it for you) and will literally be parsed in
+# the Encoding it is in. Thus CSV will return Arrays or Rows of Strings in the
+# Encoding of your data. This is accomplished by transcoding the parser itself
+# into your Encoding.
+#
+# Some transcoding must take place, of course, to accomplish this multiencoding
+# support. For example, <tt>:col_sep</tt>, <tt>:row_sep</tt>, and
+# <tt>:quote_char</tt> must be transcoded to match your data. Hopefully this
+# makes the entire process feel transparent, since CSV's defaults should just
+# magically work for you data. However, you can set these values manually in
+# the target Encoding to avoid the translation.
+#
+# It's also important to note that while all of CSV's core parser is now
+# Encoding agnostic, some features are not. For example, the built-in
+# converters will try to transcode data to UTF-8 before making conversions.
+# Again, you can provide custom converters that are aware of your Encodings to
+# avoid this translation. It's just too hard for me to support native
+# conversions in all of Ruby's Encodings.
+#
+# Anyway, the practical side of this is simple: make sure IO and String objects
+# passed into CSV have the proper Encoding set and everything should just work.
+# CSV methods that allow you to open IO objects (CSV::foreach(), CSV::open(),
+# CSV::read(), and CSV::readlines()) do allow you to specify the Encoding.
+#
+# One minor exception comes when generating CSV into a String with an Encoding
+# that is not ASCII compatible. There's no existing data for CSV to use to
+# prepare itself and thus you will probably need to manually specify the desired
+# Encoding for most of those cases. It will try to guess using the fields in a
+# row of output though, when using CSV::generate_line() or Array#to_csv().
+#
+# I try to point out any other Encoding issues in the documentation of methods
+# as they come up.
+#
+# This has been tested to the best of my ability with all non-"dummy" Encodings
+# Ruby ships with. However, it is brave new code and may have some bugs.
+# Please feel free to {report}[mailto:james@grayproductions.net] any issues you
+# find with it.
+#
class CSV
# The version of the installed library.
- VERSION = "2.0.0".freeze
+ VERSION = "2.4.0".freeze
#
# A CSV::Row is part Array and part Hash. It retains an order for the fields
@@ -188,9 +230,9 @@ class CSV
# handle extra headers or fields
@row = if headers.size > fields.size
- headers.each_with_index.map { |header, i| [header, fields[i]] }
+ headers.zip(fields)
else
- fields.each_with_index.map { |field, i| [headers[i], field] }
+ fields.zip(headers).map { |pair| pair.reverse }
end
end
@@ -444,6 +486,17 @@ class CSV
fields.to_csv(options)
end
alias_method :to_s, :to_csv
+
+ # A summary of fields, by header, in an ASCII-8BIT String.
+ def inspect
+ str = ["#<", self.class.to_s]
+ each do |header, field|
+ str << " " << (header.is_a?(Symbol) ? header.to_s : header.inspect) <<
+ ":" << field.inspect
+ end
+ str << ">"
+ str.map { |s| s.encode("ASCII-8BIT") }.join
+ end
end
#
@@ -775,6 +828,11 @@ class CSV
end.join
end
alias_method :to_s, :to_csv
+
+ # Shows the mode and size of this table in a US-ASCII String.
+ def inspect
+ "#<#{self.class} mode:#{@mode} row_count:#{to_a.size}>"
+ end
end
# The error thrown when the parser encounters illegal CSV formatting.
@@ -799,6 +857,10 @@ class CSV
DateTimeMatcher =
/ \A(?: (\w+,?\s+)?\w+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2},?\s+\d{2,4} |
\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2} )\z /x
+
+ # The encoding used by all converters.
+ ConverterEncoding = Encoding.find("UTF-8")
+
#
# This Hash holds the built-in converters of CSV that can be accessed by name.
# You can select Converters with CSV.convert() or through the +options+ Hash
@@ -813,20 +875,38 @@ class CSV
# <b><tt>:all</tt></b>:: All built-in converters. A combination of
# <tt>:date_time</tt> and <tt>:numeric</tt>.
#
+ # All built-in converters transcode field data to UTF-8 before attempting a
+ # conversion. If your data cannot be transcoded to UTF-8 the conversion will
+ # fail and the field will remain unchanged.
+ #
# This Hash is intentionally left unfrozen and users should feel free to add
# values to it that can be accessed by all CSV objects.
#
# To add a combo field, the value should be an Array of names. Combo fields
# can be nested with other combo fields.
#
- Converters = { :integer => lambda { |f| Integer(f) rescue f },
- :float => lambda { |f| Float(f) rescue f },
+ Converters = { :integer => lambda { |f|
+ Integer(f.encode(ConverterEncoding)) rescue f
+ },
+ :float => lambda { |f|
+ Float(f.encode(ConverterEncoding)) rescue f
+ },
:numeric => [:integer, :float],
:date => lambda { |f|
- f =~ DateMatcher ? (Date.parse(f) rescue f) : f
+ begin
+ e = f.encode(ConverterEncoding)
+ e =~ DateMatcher ? Date.parse(e) : f
+ rescue # encoding conversion or date parse errors
+ f
+ end
},
:date_time => lambda { |f|
- f =~ DateTimeMatcher ? (DateTime.parse(f) rescue f) : f
+ begin
+ e = f.encode(ConverterEncoding)
+ e =~ DateTimeMatcher ? DateTime.parse(e) : f
+ rescue # encoding conversion or date parse errors
+ f
+ end
},
:all => [:date_time, :numeric] }
@@ -840,6 +920,10 @@ class CSV
# replaced with underscores, non-word characters
# are dropped, and finally to_sym() is called.
#
+ # All built-in header converters transcode header data to UTF-8 before
+ # attempting a conversion. If your data cannot be transcoded to UTF-8 the
+ # conversion will fail and the header will remain unchanged.
+ #
# This Hash is intetionally left unfrozen and users should feel free to add
# values to it that can be accessed by all CSV objects.
#
@@ -847,9 +931,10 @@ class CSV
# can be nested with other combo fields.
#
HeaderConverters = {
- :downcase => lambda { |h| h.downcase },
+ :downcase => lambda { |h| h.encode(ConverterEncoding).downcase },
:symbol => lambda { |h|
- h.downcase.tr(" ", "_").delete("^a-z0-9_").to_sym
+ h.encode(ConverterEncoding).
+ downcase.tr(" ", "_").delete("^a-z0-9_").to_sym
}
}
@@ -859,6 +944,7 @@ class CSV
# <b><tt>:col_sep</tt></b>:: <tt>","</tt>
# <b><tt>:row_sep</tt></b>:: <tt>:auto</tt>
# <b><tt>:quote_char</tt></b>:: <tt>'"'</tt>
+ # <b><tt>:field_size_limit</tt></b>:: +nil+
# <b><tt>:converters</tt></b>:: +nil+
# <b><tt>:unconverted_fields</tt></b>:: +nil+
# <b><tt>:headers</tt></b>:: +false+
@@ -870,6 +956,7 @@ class CSV
DEFAULT_OPTIONS = { :col_sep => ",",
:row_sep => :auto,
:quote_char => '"',
+ :field_size_limit => nil,
:converters => nil,
:unconverted_fields => nil,
:headers => false,
@@ -879,6 +966,31 @@ class CSV
:force_quotes => false }.freeze
#
+ # This method will return a CSV instance, just like CSV::new(), but the
+ # instance will be cached and returned for all future calls to this method for
+ # the same +data+ object (tested by Object#object_id()) with the same
+ # +options+.
+ #
+ # If a block is given, the instance is passed to the block and the return
+ # value becomes the return value of the block.
+ #
+ def self.instance(data = $stdout, options = Hash.new)
+ # create a _signature_ for this method call, data object and options
+ sig = [data.object_id] +
+ options.values_at(*DEFAULT_OPTIONS.keys.sort_by { |sym| sym.to_s })
+
+ # fetch or create the instance for this signature
+ @@instances ||= Hash.new
+ instance = (@@instances[sig] ||= new(data, options))
+
+ if block_given?
+ yield instance # run block, if given, returning result
+ else
+ instance # or return the instance
+ end
+ end
+
+ #
# This method allows you to serialize an Array of Ruby objects to a String or
# File of CSV data. This is not as powerful as Marshal or YAML, but perhaps
# useful for spreadsheet and database interaction.
@@ -959,6 +1071,53 @@ class CSV
end
#
+ # This method is the reading counterpart to CSV::dump(). See that method for
+ # a detailed description of the process.
+ #
+ # You can customize loading by adding a class method called csv_load() which
+ # will be passed a Hash of meta information, an Array of headers, and an Array
+ # of fields for the object the method is expected to return.
+ #
+ # Remember that all fields will be Strings after this load. If you need
+ # something else, use +options+ to setup converters or provide a custom
+ # csv_load() implementation.
+ #
+ def self.load(io_or_str, options = Hash.new)
+ csv = new(io_or_str, options)
+
+ # load meta information
+ meta = Hash[*csv.shift]
+ cls = meta["class".encode(csv.encoding)].split("::".encode(csv.encoding)).
+ inject(Object) do |c, const|
+ c.const_get(const)
+ end
+
+ # load headers
+ headers = csv.shift
+
+ # unserialize each object stored in the file
+ results = csv.inject(Array.new) do |all, row|
+ begin
+ obj = cls.csv_load(meta, headers, row)
+ rescue NoMethodError
+ obj = cls.allocate
+ headers.zip(row) do |name, value|
+ if name[0] == ?@
+ obj.instance_variable_set(name, value)
+ else
+ obj.send(name, value)
+ end
+ end
+ end
+ all << obj
+ end
+
+ csv.close unless io_or_str.is_a? String
+
+ results
+ end
+
+ #
# :call-seq:
# filter( options = Hash.new ) { |row| ... }
# filter( input, options = Hash.new ) { |row| ... }
@@ -1014,10 +1173,20 @@ class CSV
# pass a +path+ and any +options+ you wish to set for the read. Each row of
# file will be passed to the provided +block+ in turn.
#
- # The +options+ parameter can be anything CSV::new() understands.
+ # The +options+ parameter can be anything CSV::new() understands. This method
+ # also understands an additional <tt>:encoding</tt> parameter that you can use
+ # to specify the Encoding of the data in the file to be read. You must provide
+ # this unless your data is in Encoding::default_external(). CSV will use this
+ # to deterime how to parse the data. You may provide a second Encoding to
+ # have the data transcoded as it is read. For example,
+ # <tt>:encoding => "UTF-32BE:UTF-8"</tt> would read UTF-32BE data from the
+ # file but transcode it to UTF-8 before CSV parses it.
#
def self.foreach(path, options = Hash.new, &block)
- open(path, options) do |csv|
+ encoding = options.delete(:encoding)
+ mode = "rb"
+ mode << ":#{encoding}" if encoding
+ open(path, mode, options) do |csv|
csv.each(&block)
end
end
@@ -1035,7 +1204,10 @@ class CSV
# Note that a passed String *is* modfied by this method. Call dup() before
# passing if you need a new String.
#
- # The +options+ parameter can be anthing CSV::new() understands.
+ # The +options+ parameter can be anthing CSV::new() understands. This method
+ # understands an additional <tt>:encoding</tt> parameter when not passed a
+ # String to set the base Encoding for the output. CSV needs this hint if you
+ # plan to output non-ASCII compatible data.
#
def self.generate(*args)
# add a default empty String, if none was given
@@ -1044,7 +1216,10 @@ class CSV
io.seek(0, IO::SEEK_END)
args.unshift(io)
else
- args.unshift("")
+ encoding = args.last.is_a?(Hash) ? args.last.delete(:encoding) : nil
+ str = ""
+ str.encode!(encoding) if encoding
+ args.unshift(str)
end
csv = new(*args) # wrap
yield csv # yield for appending
@@ -1055,97 +1230,40 @@ class CSV
# This method is a shortcut for converting a single row (Array) into a CSV
# String.
#
- # The +options+ parameter can be anthing CSV::new() understands.
+ # The +options+ parameter can be anthing CSV::new() understands. This method
+ # understands an additional <tt>:encoding</tt> parameter to set the base
+ # Encoding for the output. This method will try to guess your Encoding from
+ # the first non-+nil+ field in +row+, if possible, but you may need to use
+ # this parameter as a backup plan.
#
# The <tt>:row_sep</tt> +option+ defaults to <tt>$INPUT_RECORD_SEPARATOR</tt>
# (<tt>$/</tt>) when calling this method.
#
def self.generate_line(row, options = Hash.new)
- options = {:row_sep => $INPUT_RECORD_SEPARATOR}.merge(options)
- (new("", options) << row).string
- end
-
- #
- # This method will return a CSV instance, just like CSV::new(), but the
- # instance will be cached and returned for all future calls to this method for
- # the same +data+ object (tested by Object#object_id()) with the same
- # +options+.
- #
- # If a block is given, the instance is passed to the block and the return
- # value becomes the return value of the block.
- #
- def self.instance(data = $stdout, options = Hash.new)
- # create a _signature_ for this method call, data object and options
- sig = [data.object_id] +
- options.values_at(*DEFAULT_OPTIONS.keys.sort_by { |sym| sym.to_s })
-
- # fetch or create the instance for this signature
- @@instances ||= Hash.new
- instance = (@@instances[sig] ||= new(data, options))
-
- if block_given?
- yield instance # run block, if given, returning result
- else
- instance # or return the instance
- end
- end
-
- #
- # This method is the reading counterpart to CSV::dump(). See that method for
- # a detailed description of the process.
- #
- # You can customize loading by adding a class method called csv_load() which
- # will be passed a Hash of meta information, an Array of headers, and an Array
- # of fields for the object the method is expected to return.
- #
- # Remember that all fields will be Strings after this load. If you need
- # something else, use +options+ to setup converters or provide a custom
- # csv_load() implementation.
- #
- def self.load(io_or_str, options = Hash.new)
- csv = new(io_or_str, options)
-
- # load meta information
- meta = Hash[*csv.shift]
- cls = meta["class"].split("::").inject(Object) do |c, const|
- c.const_get(const)
- end
-
- # load headers
- headers = csv.shift
-
- # unserialize each object stored in the file
- results = csv.inject(Array.new) do |all, row|
- begin
- obj = cls.csv_load(meta, headers, row)
- rescue NoMethodError
- obj = cls.allocate
- headers.zip(row) do |name, value|
- if name[0] == ?@
- obj.instance_variable_set(name, value)
- else
- obj.send(name, value)
- end
- end
- end
- all << obj
- end
-
- csv.close unless io_or_str.is_a? String
-
- results
+ options = {:row_sep => $INPUT_RECORD_SEPARATOR}.merge(options)
+ encoding = options.delete(:encoding)
+ str = ""
+ if encoding
+ str.encode!(encoding)
+ elsif field = row.find { |f| not f.nil? }
+ str.encode!(String(field).encoding)
+ end
+ (new(str, options) << row).string
end
#
# :call-seq:
- # open( filename, mode="r", options = Hash.new ) { |csv| ... }
- # open( filename, mode="r", options = Hash.new )
+ # open( filename, mode = "rb", options = Hash.new ) { |faster_csv| ... }
+ # open( filename, options = Hash.new ) { |faster_csv| ... }
+ # open( filename, mode = "rb", options = Hash.new )
+ # open( filename, options = Hash.new )
#
# This method opens an IO object, and wraps that with CSV. This is intended
# as the primary interface for writing a CSV file.
#
- # You may pass any +args+ Ruby's open() understands followed by an optional
- # Hash containing any +options+ CSV::new() understands.
+ # You must pass a +filename+ and may optionally add a +mode+ for Ruby's
+ # open(). You may also pass an optional Hash containing any +options+
+ # CSV::new() understands as the final argument.
#
# This method works like Ruby's open() call, in that it will pass a CSV object
# to a provided block and close it when the block terminates, or it will
@@ -1153,24 +1271,38 @@ class CSV
# from the Ruby 1.8 CSV library which passed rows to the block. Use
# CSV::foreach() for that behavior.)
#
- # An opened CSV object will delegate to many IO methods, for convenience. You
+ # You must provide a +mode+ with an embedded Encoding designator unless your
+ # data is in Encoding::default_external(). CSV will check the Encoding of the
+ # underlying IO object (set by the +mode+ you pass) to deterime how to parse
+ # the data. You may provide a second Encoding to have the data transcoded as
+ # it is read just as you can with a normal call to IO::open(). For example,
+ # <tt>"rb:UTF-32BE:UTF-8"</tt> would read UTF-32BE data from the file but
+ # transcode it to UTF-8 before CSV parses it.
+ #
+ # An opened CSV object will delegate to many IO methods for convenience. You
# may call:
#
# * binmode()
+ # * binmode?()
# * close()
# * close_read()
# * close_write()
# * closed?()
# * eof()
# * eof?()
+ # * external_encoding()
# * fcntl()
# * fileno()
+ # * flock()
# * flush()
# * fsync()
+ # * internal_encoding()
# * ioctl()
# * isatty()
+ # * path()
# * pid()
# * pos()
+ # * pos=()
# * reopen()
# * seek()
# * stat()
@@ -1179,11 +1311,14 @@ class CSV
# * tell()
# * to_i()
# * to_io()
+ # * truncate()
# * tty?()
#
def self.open(*args)
# find the +options+ Hash
options = if args.last.is_a? Hash then args.pop else Hash.new end
+ # default to a binary open mode
+ args << "rb" if args.size == 1
# wrap a File opened with the remaining +args+
csv = new(File.open(*args), options)
@@ -1237,10 +1372,20 @@ class CSV
#
# Use to slurp a CSV file into an Array of Arrays. Pass the +path+ to the
- # file and any +options+ CSV::new() understands.
+ # file and any +options+ CSV::new() understands. This method also understands
+ # an additional <tt>:encoding</tt> parameter that you can use to specify the
+ # Encoding of the data in the file to be read. You must provide this unless
+ # your data is in Encoding::default_external(). CSV will use this to deterime
+ # how to parse the data. You may provide a second Encoding to have the data
+ # transcoded as it is read. For example,
+ # <tt>:encoding => "UTF-32BE:UTF-8"</tt> would read UTF-32BE data from the
+ # file but transcode it to UTF-8 before CSV parses it.
#
def self.read(path, options = Hash.new)
- open(path, options) { |csv| csv.read }
+ encoding = options.delete(:encoding)
+ mode = "rb"
+ mode << ":#{encoding}" if encoding
+ open(path, mode, options) { |csv| csv.read }
end
# Alias for CSV::read().
@@ -1276,6 +1421,8 @@ class CSV
# Available options are:
#
# <b><tt>:col_sep</tt></b>:: The String placed between each field.
+ # This String will be transcoded into
+ # the data's Encoding before parsing.
# <b><tt>:row_sep</tt></b>:: The String appended to the end of each
# row. This can be set to the special
# <tt>:auto</tt> setting, which requests
@@ -1295,7 +1442,16 @@ class CSV
# <tt>$INPUT_RECORD_SEPARATOR</tt>
# (<tt>$/</tt>) is used. Obviously,
# discovery takes a little time. Set
- # manually if speed is important.
+ # manually if speed is important. Also
+ # note that IO objects should be opened
+ # in binary mode on Windows if this
+ # feature will be used as the
+ # line-ending translation can cause
+ # problems with resetting the document
+ # position to where it was before the
+ # read ahead. This String will be
+ # transcoded into the data's Encoding
+ # before parsing.
# <b><tt>:quote_char</tt></b>:: The character used to quote fields.
# This has to be a single character
# String. This is useful for
@@ -1304,11 +1460,31 @@ class CSV
# instead of the correct <tt>"</tt>.
# CSV will always consider a double
# sequence this character to be an
- # escaped quote.
+ # escaped quote. This String will be
+ # transcoded into the data's Encoding
+ # before parsing.
+ # <b><tt>:field_size_limit</tt></b>:: This is a maximum size CSV will read
+ # ahead looking for the closing quote
+ # for a field. (In truth, it reads to
+ # the first line ending beyond this
+ # size.) If a quote cannot be found
+ # within the limit CSV will raise a
+ # MalformedCSVError, assuming the data
+ # is faulty. You can use this limit to
+ # prevent what are effectively DoS
+ # attacks on the parser. However, this
+ # limit can cause a legitimate parse to
+ # fail and thus is set to +nil+, or off,
+ # by default.
# <b><tt>:converters</tt></b>:: An Array of names from the Converters
# Hash and/or lambdas that handle custom
# conversion. A single converter
- # doesn't have to be in an Array.
+ # doesn't have to be in an Array. All
+ # built-in converters try to transcode
+ # fields to UTF-8 before converting.
+ # The conversion will fail if the data
+ # cannot be transcoded, leaving the
+ # field unchanged.
# <b><tt>:unconverted_fields</tt></b>:: If set to +true+, an
# unconverted_fields() method will be
# added to all returned rows (Array or
@@ -1324,11 +1500,14 @@ class CSV
# headers. If set to an Array, the
# contents will be used as the headers.
# If set to a String, the String is run
- # through a call of CSV::parse_line() to
- # produce an Array of headers. This
- # setting causes CSV.shift() to return
+ # through a call of CSV::parse_line()
+ # with the same <tt>:col_sep</tt>,
+ # <tt>:row_sep</tt>, and
+ # <tt>:quote_char</tt> as this instance
+ # to produce an Array of headers. This
+ # setting causes CSV#shift() to return
# rows as CSV::Row objects instead of
- # Arrays and CSV.read() to return
+ # Arrays and CSV#read() to return
# CSV::Table objects instead of an Array
# of Arrays.
# <b><tt>:return_headers</tt></b>:: When +false+, header rows are silently
@@ -1337,10 +1516,17 @@ class CSV
# with identical headers and
# fields (save that the fields do not go
# through the converters).
+ # <b><tt>:write_headers</tt></b>:: When +true+ and <tt>:headers</tt> is
+ # set, a header row will be added to the
+ # output.
# <b><tt>:header_converters</tt></b>:: Identical in functionality to
# <tt>:converters</tt> save that the
# conversions are only made to header
- # rows.
+ # rows. All built-in converters try to
+ # transcode headers to UTF-8 before
+ # converting. The conversion will fail
+ # if the data cannot be transcoded,
+ # leaving the header unchanged.
# <b><tt>:skip_blanks</tt></b>:: When set to a +true+ value, CSV will
# skip over any rows with no content.
# <b><tt>:force_quotes</tt></b>:: When set to a +true+ value, CSV will
@@ -1356,8 +1542,24 @@ class CSV
options = DEFAULT_OPTIONS.merge(options)
# create the IO object we will read from
- @io = if data.is_a? String then StringIO.new(data) else data end
-
+ @io = if data.is_a? String then StringIO.new(data) else data end
+ # honor the IO encoding if we can, otherwise default to ASCII-8BIT
+ @encoding = if @io.respond_to? :internal_encoding
+ @io.internal_encoding || @io.external_encoding
+ elsif @io.is_a? StringIO
+ @io.string.encoding
+ end
+ @encoding ||= Encoding.default_external
+ #
+ # prepare for build safe regular expressions in the target encoding,
+ # if we can transcode the needed characters
+ #
+ @re_esc = "\\".encode(@encoding) rescue ""
+ @re_chars = %w[ \\ . [ ] - ^ $ ?
+ * + { } ( ) | #
+ \ \r \n \t \f \v ].
+ map { |s| s.encode(@encoding) rescue nil }.compact
+
init_separators(options)
init_parsers(options)
init_converters(options)
@@ -1372,6 +1574,79 @@ class CSV
end
#
+ # The encoded <tt>:col_sep</tt> used in parsing and writing. See CSV::new
+ # for details.
+ #
+ attr_reader :col_sep
+ #
+ # The encoded <tt>:row_sep</tt> used in parsing and writing. See CSV::new
+ # for details.
+ #
+ attr_reader :row_sep
+ #
+ # The encoded <tt>:quote_char</tt> used in parsing and writing. See CSV::new
+ # for details.
+ #
+ attr_reader :quote_char
+ # The limit for field size, if any. See CSV::new for details.
+ attr_reader :field_size_limit
+ #
+ # Returns the current list of converters in effect. See CSV::new for details.
+ # Built-in converters will be returned by name, while others will be returned
+ # as is.
+ #
+ def converters
+ @converters.map do |converter|
+ name = Converters.rassoc(converter)
+ name ? name.first : converter
+ end
+ end
+ #
+ # Returns +true+ if unconverted_fields() to parsed results. See CSV::new
+ # for details.
+ #
+ def unconverted_fields?() @unconverted_fields end
+ #
+ # Returns +nil+ if headers will not be used, +true+ if they will but have not
+ # yet been read, or the actual headers after they have been read. See
+ # CSV::new for details.
+ #
+ def headers
+ @headers || true if @use_headers
+ end
+ #
+ # Returns +true+ if headers will be returned as a row of results.
+ # See CSV::new for details.
+ #
+ def return_headers?() @return_headers end
+ # Returns +true+ if headers are written in output. See CSV::new for details.
+ def write_headers?() @write_headers end
+ #
+ # Returns the current list of converters in effect for headers. See CSV::new
+ # for details. Built-in converters will be returned by name, while others
+ # will be returned as is.
+ #
+ def header_converters
+ @header_converters.map do |converter|
+ name = HeaderConverters.rassoc(converter)
+ name ? name.first : converter
+ end
+ end
+ #
+ # Returns +true+ blank lines are skipped by the parser. See CSV::new
+ # for details.
+ #
+ def skip_blanks?() @skip_blanks end
+ # Returns +true+ if all output fields are quoted. See CSV::new for details.
+ def force_quotes?() @force_quotes end
+
+ #
+ # The Encoding CSV is parsing or writing in. This will be the Encoding you
+ # receive parsed data in and/or the Encoding data will be written in.
+ #
+ attr_reader :encoding
+
+ #
# The line number of the last row read from this file. Fields with nested
# line-end characters will not affect this count.
#
@@ -1380,10 +1655,12 @@ class CSV
### IO and StringIO Delegation ###
extend Forwardable
- def_delegators :@io, :binmode, :close, :close_read, :close_write, :closed?,
- :eof, :eof?, :fcntl, :fileno, :flush, :fsync, :ioctl,
- :isatty, :pid, :pos, :reopen, :seek, :stat, :string,
- :sync, :sync=, :tell, :to_i, :to_io, :tty?
+ def_delegators :@io, :binmode, :binmode?, :close, :close_read, :close_write,
+ :closed?, :eof, :eof?, :external_encoding, :fcntl,
+ :fileno, :flock, :flush, :fsync, :internal_encoding,
+ :ioctl, :isatty, :path, :pid, :pos, :pos=, :reopen,
+ :seek, :stat, :string, :sync, :sync=, :tell, :to_i,
+ :to_io, :truncate, :tty?
# Rewinds the underlying IO object and resets CSV's lineno() counter.
def rewind
@@ -1403,12 +1680,18 @@ class CSV
# The data source must be open for writing.
#
def <<(row)
+ # make sure headers have been assigned
+ if header_row? and [Array, String].include? @use_headers.class
+ parse_headers # won't read data for Array or String
+ self << @headers if @write_headers
+ end
+
# handle CSV::Row objects and Hashes
row = case row
- when self.class::Row then row.fields
- when Hash then @headers.map { |header| row[header] }
- else row
- end
+ when self.class::Row then row.fields
+ when Hash then @headers.map { |header| row[header] }
+ else row
+ end
@headers = row if header_row?
@lineno += 1
@@ -1431,7 +1714,7 @@ class CSV
#
# If you provide a block that takes one argument, it will be passed the field
# and is expected to return the converted value or the field itself. If your
- # block takes two arguments, it will also be passed a FieldInfo Struct,
+ # block takes two arguments, it will also be passed a CSV::FieldInfo Struct,
# containing details about the field. Again, the block should return a
# converted field or the field itself.
#
@@ -1445,7 +1728,7 @@ class CSV
# header_convert { |field| ... }
# header_convert { |field, field_info| ... }
#
- # Identical to CSV.convert(), but for header rows.
+ # Identical to CSV#convert(), but for header rows.
#
# Note that this method must be called before header rows are read to have any
# effect.
@@ -1526,7 +1809,7 @@ class CSV
# add another read to the line
(line += @io.gets(@row_sep)) rescue return nil
# copy the line so we can chop it up in parsing
- parse = line.dup
+ parse = line.dup
parse.sub!(@parsers[:line_end], "")
#
@@ -1566,7 +1849,7 @@ class CSV
nil # for Ruby 1.8 CSV compatibility
else
# I decided to take a strict approach to CSV parsing...
- if $2.count("\r\n").zero? # verify correctness of field...
+ if $2.count(@parsers[:return_newline]).zero? # verify correctness
$2
else
# or throw an Exception
@@ -1603,6 +1886,10 @@ class CSV
# if we're not empty?() but at eof?(), a quoted field wasn't closed...
if @io.eof?
raise MalformedCSVError, "Unclosed quoted field on line #{lineno + 1}."
+ elsif parse =~ @parsers[:bad_field]
+ raise MalformedCSVError, "Illegal quoting on line #{lineno + 1}."
+ elsif @field_size_limit and parse.length >= @field_size_limit
+ raise MalformedCSVError, "Field size exceeded on line #{lineno + 1}."
end
# otherwise, we need to loop and pull some more data to complete the row
end
@@ -1610,6 +1897,38 @@ class CSV
alias_method :gets, :shift
alias_method :readline, :shift
+ #
+ # Returns a simplified description of the key FasterCSV attributes in an
+ # ASCII-8BIT String.
+ #
+ def inspect
+ str = ["<#", self.class.to_s, " io_type:"]
+ # show type of wrapped IO
+ if @io == $stdout then str << "$stdout"
+ elsif @io == $stdin then str << "$stdin"
+ elsif @io == $stderr then str << "$stderr"
+ else str << @io.class.to_s
+ end
+ # show IO.path(), if available
+ if @io.respond_to?(:path) and (p = @io.path)
+ str << " io_path:" << p.inspect
+ end
+ # show encoding
+ str << " encoding:" << @encoding.name
+ # show other attributes
+ %w[ lineno col_sep row_sep
+ quote_char skip_blanks ].each do |attr_name|
+ if a = instance_variable_get("@#{attr_name}")
+ str << " " << attr_name << ":" << a.inspect
+ end
+ end
+ if @use_headers
+ str << " headers:" << headers.inspect
+ end
+ str << ">"
+ str.map { |s| s.encode("ASCII-8BIT") }.join
+ end
+
private
#
@@ -1624,15 +1943,18 @@ class CSV
#
def init_separators(options)
# store the selected separators
- @col_sep = options.delete(:col_sep)
- @row_sep = options.delete(:row_sep)
- @quote_char = options.delete(:quote_char)
+ @col_sep = options.delete(:col_sep).to_s.encode(@encoding)
+ @row_sep = options.delete(:row_sep) # encode after resolving :auto
+ @quote_char = options.delete(:quote_char).to_s.encode(@encoding)
if @quote_char.length != 1
raise ArgumentError, ":quote_char has to be a single character String"
end
+ #
# automatically discover row separator when requested
+ # (not fully encoding safe)
+ #
if @row_sep == :auto
if [ARGF, STDIN, STDOUT, STDERR].include?(@io) or
(defined?(Zlib) and @io.class == Zlib::GzipWriter)
@@ -1651,11 +1973,12 @@ class CSV
end
# read ahead a bit
- sample = @io.read(1024)
- sample += @io.read(1) if sample[-1..-1] == "\r" and not @io.eof?
+ sample = read_to_char(1024)
+ sample += read_to_char(1) if sample[-1..-1] == encode_str("\r") and
+ not @io.eof?
# try to find a standard separator
- if sample =~ /\r\n?|\n/
+ if sample =~ encode_re("\r\n?|\n")
@row_sep = $&
break
end
@@ -1673,14 +1996,17 @@ class CSV
end
end
end
+ @row_sep = @row_sep.to_s.encode(@encoding)
# establish quoting rules
- do_quote = lambda do |field|
+ @force_quotes = options.delete(:force_quotes)
+ do_quote = lambda do |field|
@quote_char +
String(field).gsub(@quote_char, @quote_char * 2) +
@quote_char
end
- @quote = if options.delete(:force_quotes)
+ quotable_chars = encode_str("\r\n", @col_sep, @quote_char)
+ @quote = if @force_quotes
do_quote
else
lambda do |field|
@@ -1690,7 +2016,7 @@ class CSV
field = String(field) # Stringify fields
# represent empty fields as empty quoted fields
if field.empty? or
- field.count("\r\n#{@col_sep}#{@quote_char}").nonzero?
+ field.count(quotable_chars).nonzero?
do_quote.call(field)
else
field # unquoted field
@@ -1703,27 +2029,45 @@ class CSV
# Pre-compiles parsers and stores them by name for access during reads.
def init_parsers(options)
# store the parser behaviors
- @skip_blanks = options.delete(:skip_blanks)
+ @skip_blanks = options.delete(:skip_blanks)
+ @field_size_limit = options.delete(:field_size_limit)
# prebuild Regexps for faster parsing
- esc_col_sep = Regexp.escape(@col_sep)
- esc_row_sep = Regexp.escape(@row_sep)
- esc_quote = Regexp.escape(@quote_char)
+ esc_col_sep = escape_re(@col_sep)
+ esc_row_sep = escape_re(@row_sep)
+ esc_quote = escape_re(@quote_char)
@parsers = {
- :leading_fields =>
- /\A(?:#{esc_col_sep})+/, # for empty leading fields
- :csv_row =>
- ### The Primary Parser ###
- / \G(?:^|#{esc_col_sep}) # anchor the match
- (?: #{esc_quote}( (?>[^#{esc_quote}]*) # find quoted fields
- (?> #{esc_quote*2}
- [^#{esc_quote}]* )* )#{esc_quote}
- | # ... or ...
- ([^#{esc_quote}#{esc_col_sep}]*) # unquoted fields
- )/x,
- ### End Primary Parser ###
- :line_end =>
- /#{esc_row_sep}\z/ # safer than chomp!()
+ # for empty leading fields
+ :leading_fields => encode_re("\\A(?:", esc_col_sep, ")+"),
+ # The Primary Parser
+ :csv_row => encode_re(
+ "\\G(?:\\A|", esc_col_sep, ")", # anchor the match
+ "(?:", esc_quote, # find quoted fields
+ "((?>[^", esc_quote, "]*)", # "unrolling the loop"
+ "(?>", esc_quote * 2, # double for escaping
+ "[^", esc_quote, "]*)*)",
+ esc_quote,
+ "|", # ... or ...
+ "([^", esc_quote, esc_col_sep, "]*))", # unquoted fields
+ "(?=", esc_col_sep, "|\\z)" # ensure field is ended
+ ),
+ # a test for unescaped quotes
+ :bad_field => encode_re(
+ "\\A", esc_col_sep, "?", # an optional comma
+ "(?:", esc_quote, # a quoted field
+ "(?>[^", esc_quote, "]*)", # "unrolling the loop"
+ "(?>", esc_quote * 2, # double for escaping
+ "[^", esc_quote, "]*)*",
+ esc_quote, # the closing quote
+ "[^", esc_quote, "]", # an extra character
+ "|", # ... or ...
+ "[^", esc_quote, esc_col_sep, "]+", # an unquoted field
+ esc_quote, ")" # an extra quote
+ ),
+ # safer than chomp!()
+ :line_end => encode_re(esc_row_sep, "\\z"),
+ # illegal unquoted characters
+ :return_newline => encode_str("\r\n")
}
end
@@ -1770,6 +2114,7 @@ class CSV
def init_headers(options)
@use_headers = options.delete(:headers)
@return_headers = options.delete(:return_headers)
+ @write_headers = options.delete(:write_headers)
# headers must be delayed until shift(), in case they need a row of content
@headers = nil
@@ -1812,7 +2157,7 @@ class CSV
# see if we are converting headers or fields
converters = headers ? @header_converters : @converters
- fields.each_with_index.map do |field, index| # map_with_index
+ fields.map.with_index do |field, index|
converters.each do |converter|
field = if converter.arity == 1 # straight field converter
converter[field]
@@ -1839,10 +2184,17 @@ class CSV
def parse_headers(row = nil)
if @headers.nil? # header row
@headers = case @use_headers # save headers
- when Array then @use_headers # Array of headers
- when String then self.class.parse_line(@use_headers) # CSV header String
- else row # first row headers
- end
+ # Array of headers
+ when Array then @use_headers
+ # CSV header String
+ when String
+ self.class.parse_line( @use_headers,
+ :col_sep => @col_sep,
+ :row_sep => @row_sep,
+ :quote_char => @quote_char )
+ # first row is headers
+ else row
+ end
# prepare converted and unconverted copies
row = @headers if row.nil?
@@ -1870,6 +2222,56 @@ class CSV
row.instance_eval { @unconverted_fields = fields }
row
end
+
+ #
+ # This method is an encoding safe version of Regexp::escape(). I will escape
+ # any characters that would change the meaning of a regular expression in the
+ # encoding of +str+. Regular expression characters that cannot be transcoded
+ # to the target encodign will be skipped and no escaping will be performed if
+ # a backslash cannot be transcoded.
+ #
+ def escape_re(str)
+ str.chars.map { |c| @re_chars.include?(c) ? @re_esc + c : c }.join
+ end
+
+ #
+ # Builds a regular expression in <tt>@encoding</tt>. All +chunks+ will be
+ # transcoded to that encoding.
+ #
+ def encode_re(*chunks)
+ Regexp.new(encode_str(*chunks))
+ end
+
+ #
+ # Builds a String in <tt>@encoding</tt>. All +chunks+ will be transcoded to
+ # that encoding.
+ #
+ def encode_str(*chunks)
+ chunks.map { |chunk| chunk.encode(@encoding.name) }.join
+ end
+
+ #
+ # Reads at least +bytes+ from <tt>@io</tt>, but will read on until the data
+ # read is valid in the ecoding of that data. This should ensure that it is
+ # safe to use regular expressions on the read data. The read data will be
+ # returned in <tt>@encoding</tt>.
+ #
+ def read_to_char(bytes)
+ return "" if @io.eof?
+ data = @io.read(bytes)
+ begin
+ encoded = encode_str(data)
+ raise unless encoded.valid_encoding?
+ return encoded
+ rescue # encoding error or my invalid data raise
+ if @io.eof?
+ return data
+ else
+ data += @io.read(1) until data.valid_encoding? or @io.eof?
+ retry
+ end
+ end
+ end
end
# Another name for CSV::instance().
diff --git a/test/csv/tc_csv_parsing.rb b/test/csv/tc_csv_parsing.rb
index 965af929f..635ae9053 100644
--- a/test/csv/tc_csv_parsing.rb
+++ b/test/csv/tc_csv_parsing.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_csv_parsing.rb
#
@@ -7,6 +8,7 @@
# under the terms of Ruby's license.
require "test/unit"
+require "timeout"
require "csv"
@@ -17,6 +19,8 @@ require "csv"
# separator <tt>$/</tt>.
#
class TestCSVParsing < Test::Unit::TestCase
+ BIG_DATA = "123456789\n" * 1024
+
def test_mastering_regex_example
ex = %Q{Ten Thousand,10000, 2710 ,,"10,000","It's ""10 Grand"", baby",10K}
assert_equal( [ "Ten Thousand", "10000", " 2710 ", nil, "10,000",
@@ -158,7 +162,31 @@ class TestCSVParsing < Test::Unit::TestCase
assert_send([csv.lineno, :<, 4])
end
rescue CSV::MalformedCSVError
- assert_equal("Unclosed quoted field on line 4.", $!.message)
+ assert_equal("Illegal quoting on line 4.", $!.message)
+ end
+ end
+
+ def test_the_parse_fails_fast_when_it_can_for_unquoted_fields
+ assert_parse_errors_out('valid,fields,bad start"' + BIG_DATA)
+ end
+
+ def test_the_parse_fails_fast_when_it_can_for_unescaped_quotes
+ assert_parse_errors_out('valid,fields,"bad start"unescaped' + BIG_DATA)
+ end
+
+ def test_field_size_limit_controls_lookahead
+ assert_parse_errors_out( 'valid,fields,"' + BIG_DATA + '"',
+ :field_size_limit => 2048 )
+ end
+
+ private
+
+ def assert_parse_errors_out(*args)
+ assert_raise(CSV::MalformedCSVError) do
+ Timeout.timeout(0.2) do
+ CSV.parse(*args)
+ fail("Parse didn't error out")
+ end
end
end
end
diff --git a/test/csv/tc_csv_writing.rb b/test/csv/tc_csv_writing.rb
index 467728430..a1ce4de97 100644
--- a/test/csv/tc_csv_writing.rb
+++ b/test/csv/tc_csv_writing.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_csv_writing.rb
#
diff --git a/test/csv/tc_data_converters.rb b/test/csv/tc_data_converters.rb
index 24c6b6b76..acf27a666 100644
--- a/test/csv/tc_data_converters.rb
+++ b/test/csv/tc_data_converters.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_data_converters.rb
#
diff --git a/test/csv/tc_encodings.rb b/test/csv/tc_encodings.rb
new file mode 100644
index 000000000..c773ec9cb
--- /dev/null
+++ b/test/csv/tc_encodings.rb
@@ -0,0 +1,255 @@
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
+
+# tc_encodings.rb
+#
+# Created by James Edward Gray II on 2008-09-13.
+# Copyright 2008 James Edward Gray II. You can redistribute or modify this code
+# under the terms of Ruby's license.
+
+require "test/unit"
+
+require "csv"
+
+class TestEncodings < Test::Unit::TestCase
+ def setup
+ @temp_csv_path = File.join(File.dirname(__FILE__), "temp.csv")
+ end
+
+ def teardown
+ File.unlink(@temp_csv_path) if File.exist? @temp_csv_path
+ end
+
+ ########################################
+ ### Hand Test Some Popular Encodings ###
+ ########################################
+
+ def test_parses_utf8_encoding
+ assert_parses( [ %w[ one two … ],
+ %w[ 1 … 3 ],
+ %w[ … 5 6 ] ], "UTF-8" )
+ end
+
+ def test_parses_latin1_encoding
+ assert_parses( [ %w[ one two Résumé ],
+ %w[ 1 Résumé 3 ],
+ %w[ Résumé 5 6 ] ], "ISO-8859-1" )
+ end
+
+ def test_parses_utf16be_encoding
+ assert_parses( [ %w[ one two … ],
+ %w[ 1 … 3 ],
+ %w[ … 5 6 ] ], "UTF-16BE" )
+ end
+
+ def test_parses_shift_jis_encoding
+ assert_parses( [ %w[ 一 二 三 ],
+ %w[ 四 五 六 ],
+ %w[ 七 八 九 ] ], "Shift_JIS" )
+ end
+
+ ###########################################################
+ ### Try Simple Reading for All Non-dummy Ruby Encodings ###
+ ###########################################################
+
+ def test_reading_with_most_encodings
+ each_encoding do |encoding|
+ begin
+ assert_parses( [ %w[ abc def ],
+ %w[ ghi jkl ] ], encoding )
+ rescue Encoding::NoConverterError
+ fail("Failed to support #{encoding.name}.")
+ end
+ end
+ end
+
+ def test_regular_expression_escaping
+ each_encoding do |encoding|
+ begin
+ assert_parses( [ %w[ abc def ],
+ %w[ ghi jkl ] ], encoding, :col_sep => "|" )
+ rescue Encoding::NoConverterError
+ fail("Failed to properly escape #{encoding.name}.")
+ end
+ end
+ end
+
+ #######################################################################
+ ### Stress Test ASCII Compatible and Non-ASCII Compatible Encodings ###
+ #######################################################################
+
+ def test_auto_line_ending_detection
+ # arrange data to place a \r at the end of CSV's read ahead point
+ encode_for_tests([["a" * 509]], :row_sep => "\r\n") do |data|
+ assert_equal("\r\n".encode(data.encoding), CSV.new(data).row_sep)
+ end
+ end
+
+ def test_csv_chars_are_transcoded
+ encode_for_tests([%w[abc def]]) do |data|
+ %w[col_sep row_sep quote_char].each do |csv_char|
+ assert_equal( "|".encode(data.encoding),
+ CSV.new(data, csv_char.to_sym => "|").send(csv_char) )
+ end
+ end
+ end
+
+ def test_parser_works_with_encoded_headers
+ encode_for_tests([%w[one two three], %w[1 2 3]]) do |data|
+ parsed = CSV.parse(data, :headers => true)
+ assert( parsed.headers.all? { |h| h.encoding == data.encoding },
+ "Wrong data encoding." )
+ parsed.each do |row|
+ assert( row.fields.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+ end
+ end
+
+ def test_built_in_converters_transcode_to_utf_8_then_convert
+ encode_for_tests([%w[one two three], %w[1 2 3]]) do |data|
+ parsed = CSV.parse(data, :converters => :integer)
+ assert( parsed[0].all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ assert_equal([1, 2, 3], parsed[1])
+ end
+ end
+
+ def test_built_in_header_converters_transcode_to_utf_8_then_convert
+ encode_for_tests([%w[one two three], %w[1 2 3]]) do |data|
+ parsed = CSV.parse( data, :headers => true,
+ :header_converters => :downcase )
+ assert( parsed.headers.all? { |h| h.encoding.name == "UTF-8" },
+ "Wrong data encoding." )
+ assert( parsed[0].fields.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+ end
+
+ def test_open_allows_you_to_set_encodings
+ encode_for_tests([%w[abc def]]) do |data|
+ # read and write in encoding
+ File.open(@temp_csv_path, "wb:#{data.encoding.name}") { |f| f << data }
+ CSV.open(@temp_csv_path, "rb:#{data.encoding.name}") do |csv|
+ csv.each do |row|
+ assert( row.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+ end
+
+ # read and write with transcoding
+ File.open(@temp_csv_path, "wb:UTF-32BE:#{data.encoding.name}") do |f|
+ f << data
+ end
+ CSV.open(@temp_csv_path, "rb:UTF-32BE:#{data.encoding.name}") do |csv|
+ csv.each do |row|
+ assert( row.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+ end
+ end
+ end
+
+ def test_foreach_allows_you_to_set_encodings
+ encode_for_tests([%w[abc def]]) do |data|
+ # read and write in encoding
+ File.open(@temp_csv_path, "wb:#{data.encoding.name}") { |f| f << data }
+ CSV.foreach(@temp_csv_path, :encoding => data.encoding.name) do |row|
+ assert( row.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+
+ # read and write with transcoding
+ File.open(@temp_csv_path, "wb:UTF-32BE:#{data.encoding.name}") do |f|
+ f << data
+ end
+ CSV.foreach( @temp_csv_path,
+ :encoding => "UTF-32BE:#{data.encoding.name}" ) do |row|
+ assert( row.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+ end
+ end
+
+ def test_read_allows_you_to_set_encodings
+ encode_for_tests([%w[abc def]]) do |data|
+ # read and write in encoding
+ File.open(@temp_csv_path, "wb:#{data.encoding.name}") { |f| f << data }
+ rows = CSV.read(@temp_csv_path, :encoding => data.encoding.name)
+ assert( rows.flatten.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+
+ # read and write with transcoding
+ File.open(@temp_csv_path, "wb:UTF-32BE:#{data.encoding.name}") do |f|
+ f << data
+ end
+ rows = CSV.read( @temp_csv_path,
+ :encoding => "UTF-32BE:#{data.encoding.name}" )
+ assert( rows.flatten.all? { |f| f.encoding == data.encoding },
+ "Wrong data encoding." )
+ end
+ end
+
+ #################################
+ ### Write CSV in any Encoding ###
+ #################################
+
+ def test_can_write_csv_in_any_encoding
+ each_encoding do |encoding|
+ # test generate_line with encoding hint
+ csv = %w[abc d|ef].map { |f| f.encode(encoding) }.
+ to_csv(:col_sep => "|", :encoding => encoding.name)
+ assert_equal(encoding, csv.encoding)
+
+ # test generate_line with encoding guessing from fields
+ csv = %w[abc d|ef].map { |f| f.encode(encoding) }.to_csv(:col_sep => "|")
+ assert_equal(encoding, csv.encoding)
+
+ # writing to files
+ data = encode_ary([%w[abc d,ef], %w[123 456 ]], encoding)
+ CSV.open(@temp_csv_path, "wb:#{encoding.name}") do |csv|
+ data.each { |row| csv << row }
+ end
+ assert_equal(data, CSV.read(@temp_csv_path, :encoding => encoding.name))
+ end
+ end
+
+ private
+
+ def assert_parses(fields, encoding, options = { })
+ encoding = Encoding.find(encoding) unless encoding.is_a? Encoding
+ fields = encode_ary(fields, encoding)
+ parsed = CSV.parse(ary_to_data(fields, options), options)
+ assert_equal(fields, parsed)
+ assert( parsed.flatten.all? { |field| field.encoding == encoding },
+ "Fields were transcoded." )
+ end
+
+ def encode_ary(ary, encoding)
+ ary.map { |row| row.map { |field| field.encode(encoding) } }
+ end
+
+ def ary_to_data(ary, options = { })
+ encoding = ary.flatten.first.encoding
+ quote_char = (options[:quote_char] || '"').encode(encoding)
+ col_sep = (options[:col_sep] || ",").encode(encoding)
+ row_sep = (options[:row_sep] || "\n").encode(encoding)
+ ary.map { |row|
+ row.map { |field|
+ [quote_char, field.encode(encoding), quote_char].join
+ }.join(col_sep) + row_sep
+ }.join.encode(encoding)
+ end
+
+ def encode_for_tests(data, options = { })
+ yield ary_to_data(encode_ary(data, "UTF-8"), options)
+ yield ary_to_data(encode_ary(data, "UTF-16BE"), options)
+ end
+
+ def each_encoding
+ Encoding.list.each do |encoding|
+ next if encoding.dummy? # skip "dummy" encodings
+ yield encoding
+ end
+ end
+end
diff --git a/test/csv/tc_features.rb b/test/csv/tc_features.rb
index ae5a8a451..ad6732a1e 100644
--- a/test/csv/tc_features.rb
+++ b/test/csv/tc_features.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_features.rb
#
@@ -67,18 +68,25 @@ class TestCSVFeatures < Test::Unit::TestCase
end
end
+ def test_csv_char_readers
+ %w[col_sep row_sep quote_char].each do |reader|
+ csv = CSV.new("abc,def", reader.to_sym => "|")
+ assert_equal("|", csv.send(reader))
+ end
+ end
+
def test_row_sep_auto_discovery
["\r\n", "\n", "\r"].each do |line_end|
data = "1,2,3#{line_end}4,5#{line_end}"
- discovered = CSV.new(data).instance_eval { @row_sep }
+ discovered = CSV.new(data).row_sep
assert_equal(line_end, discovered)
end
- assert_equal("\n", CSV.new("\n\r\n\r").instance_eval { @row_sep })
+ assert_equal("\n", CSV.new("\n\r\n\r").row_sep)
- assert_equal($/, CSV.new("").instance_eval { @row_sep })
+ assert_equal($/, CSV.new("").row_sep)
- assert_equal($/, CSV.new(STDERR).instance_eval { @row_sep })
+ assert_equal($/, CSV.new(STDERR).row_sep)
end
def test_lineno
@@ -117,6 +125,51 @@ class TestCSVFeatures < Test::Unit::TestCase
assert_equal(3, count)
end
+ def test_csv_behavior_readers
+ %w[ unconverted_fields return_headers write_headers
+ skip_blanks force_quotes ].each do |behavior|
+ assert( !CSV.new("abc,def").send("#{behavior}?"),
+ "Behavior defaulted to on." )
+ csv = CSV.new("abc,def", behavior.to_sym => true)
+ assert(csv.send("#{behavior}?"), "Behavior change now registered.")
+ end
+ end
+
+ def test_converters_reader
+ # no change
+ assert_equal( [:integer],
+ CSV.new("abc,def", :converters => [:integer]).converters )
+
+ # just one
+ assert_equal( [:integer],
+ CSV.new("abc,def", :converters => :integer).converters )
+
+ # expanded
+ assert_equal( [:integer, :float],
+ CSV.new("abc,def", :converters => :numeric).converters )
+
+ # custom
+ csv = CSV.new("abc,def", :converters => [:integer, lambda { }])
+ assert_equal(2, csv.converters.size)
+ assert_equal(:integer, csv.converters.first)
+ assert_instance_of(Proc, csv.converters.last)
+ end
+
+ def test_header_converters_reader
+ # no change
+ hc = :header_converters
+ assert_equal([:downcase], CSV.new("abc,def", hc => [:downcase]).send(hc))
+
+ # just one
+ assert_equal([:downcase], CSV.new("abc,def", hc => :downcase).send(hc))
+
+ # custom
+ csv = CSV.new("abc,def", hc => [:symbol, lambda { }])
+ assert_equal(2, csv.send(hc).size)
+ assert_equal(:symbol, csv.send(hc).first)
+ assert_instance_of(Proc, csv.send(hc).last)
+ end
+
# reported by Kev Jackson
def test_failing_to_escape_col_sep_bug_fix
assert_nothing_raised(Exception) { CSV.new(String.new, :col_sep => "|") }
@@ -149,7 +202,7 @@ class TestCSVFeatures < Test::Unit::TestCase
)
)
end
- assert_equal("\r\n", zipped.instance_eval { @row_sep })
+ assert_equal("\r\n", zipped.row_sep)
end
def test_gzip_writer_bug_fix
@@ -168,6 +221,41 @@ class TestCSVFeatures < Test::Unit::TestCase
File.unlink(file)
end
+ def test_inspect_is_smart_about_io_types
+ str = CSV.new("string,data").inspect
+ assert(str.include?("io_type:StringIO"), "IO type not detected.")
+
+ str = CSV.new($stderr).inspect
+ assert(str.include?("io_type:$stderr"), "IO type not detected.")
+
+ path = File.join(File.dirname(__FILE__), "temp.csv")
+ File.open(path, "w") { |csv| csv << "one,two,three\n1,2,3\n" }
+ str = CSV.open(path) { |csv| csv.inspect }
+ assert(str.include?("io_type:File"), "IO type not detected.")
+ File.unlink(path)
+ end
+
+ def test_inspect_shows_key_attributes
+ str = @csv.inspect
+ %w[lineno col_sep row_sep quote_char].each do |attr_name|
+ assert_match(/\b#{attr_name}:[^\s>]+/, str)
+ end
+ end
+
+ def test_inspect_shows_headers_when_available
+ CSV.new("one,two,three\n1,2,3\n", :headers => true) do |csv|
+ assert(csv.inspect.include?("headers:true"), "Header hint not shown.")
+ csv.shift # load headers
+ assert_match(/headers:\[[^\]]+\]/, csv.inspect)
+ end
+ end
+
+ def test_inspect_is_ascii_8bit_encoded
+ CSV.new("one,two,three\n1,2,3\n".encode("UTF-16BE")) do |csv|
+ assert_equal("ASCII-8BIT", csv.inspect.encoding.name)
+ end
+ end
+
def test_version
assert_not_nil(CSV::VERSION)
assert_instance_of(String, CSV::VERSION)
diff --git a/test/csv/tc_headers.rb b/test/csv/tc_headers.rb
index 74e2f54ad..e0f544dad 100644
--- a/test/csv/tc_headers.rb
+++ b/test/csv/tc_headers.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_headers.rb
#
@@ -129,6 +130,21 @@ class TestCSVHeaders < Test::Unit::TestCase
assert(!row.field_row?)
end
+ def test_csv_header_string_inherits_separators
+ # parse with custom col_sep
+ csv = nil
+ assert_nothing_raised(Exception) do
+ csv = CSV.parse( @data.tr(",", "|"), :col_sep => "|",
+ :headers => "my|new|headers" )
+ end
+
+ # verify headers were recognized
+ row = csv[0]
+ assert_not_nil(row)
+ assert_instance_of(CSV::Row, row)
+ assert_equal([%w{my first}, %w{new second}, %w{headers third}], row.to_a)
+ end
+
def test_return_headers
# activate headers and request they are returned
csv = nil
@@ -250,6 +266,17 @@ class TestCSVHeaders < Test::Unit::TestCase
end
end
+ def test_headers_reader
+ # no headers
+ assert_nil(CSV.new(@data).headers)
+
+ # headers
+ csv = CSV.new(@data, :headers => true)
+ assert_equal(true, csv.headers) # before headers are read
+ csv.shift # set headers
+ assert_equal(%w[first second third], csv.headers) # after headers are read
+ end
+
def test_blank_row_bug_fix
@data += "\n#{@data}" # add a blank row
diff --git a/test/csv/tc_interface.rb b/test/csv/tc_interface.rb
index e8cc920f9..9cacc28b0 100644
--- a/test/csv/tc_interface.rb
+++ b/test/csv/tc_interface.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_interface.rb
#
@@ -42,8 +43,9 @@ class TestCSVInterface < Test::Unit::TestCase
csv.close
assert(csv.closed?)
- ret = CSV.open(@path) do |csv|
- assert_instance_of(CSV, csv)
+ ret = CSV.open(@path) do |new_csv|
+ csv = new_csv
+ assert_instance_of(CSV, new_csv)
"Return value."
end
assert(csv.closed?)
@@ -161,7 +163,6 @@ class TestCSVInterface < Test::Unit::TestCase
lines = [{:a => 1, :b => 2, :c => 3}, {:a => 4, :b => 5, :c => 6}]
CSV.open( @path, "w", :headers => true,
- :converters => :all,
:header_converters => :symbol ) do |csv|
csv << lines.first.keys
lines.each { |line| csv << line }
@@ -173,6 +174,74 @@ class TestCSVInterface < Test::Unit::TestCase
end
end
+ def test_write_hash_with_headers_array
+ File.unlink(@path)
+
+ lines = [{:a => 1, :b => 2, :c => 3}, {:a => 4, :b => 5, :c => 6}]
+ CSV.open(@path, "w", :headers => [:b, :a, :c]) do |csv|
+ lines.each { |line| csv << line }
+ end
+
+ # test writing fields in the correct order
+ File.open(@path, "r") do |f|
+ assert_equal("2,1,3", f.gets.strip)
+ assert_equal("5,4,6", f.gets.strip)
+ end
+
+ # test reading CSV with headers
+ CSV.open( @path, "r", :headers => [:b, :a, :c],
+ :converters => :all ) do |csv|
+ csv.each { |line| assert_equal(lines.shift, line.to_hash) }
+ end
+ end
+
+ def test_write_hash_with_headers_string
+ File.unlink(@path)
+
+ lines = [{"a" => 1, "b" => 2, "c" => 3}, {"a" => 4, "b" => 5, "c" => 6}]
+ CSV.open(@path, "w", :headers => "b|a|c", :col_sep => "|") do |csv|
+ lines.each { |line| csv << line }
+ end
+
+ # test writing fields in the correct order
+ File.open(@path, "r") do |f|
+ assert_equal("2|1|3", f.gets.strip)
+ assert_equal("5|4|6", f.gets.strip)
+ end
+
+ # test reading CSV with headers
+ CSV.open( @path, "r", :headers => "b|a|c",
+ :col_sep => "|",
+ :converters => :all ) do |csv|
+ csv.each { |line| assert_equal(lines.shift, line.to_hash) }
+ end
+ end
+
+ def test_write_headers
+ File.unlink(@path)
+
+ lines = [{"a" => 1, "b" => 2, "c" => 3}, {"a" => 4, "b" => 5, "c" => 6}]
+ CSV.open( @path, "w", :headers => "b|a|c",
+ :write_headers => true,
+ :col_sep => "|" ) do |csv|
+ lines.each { |line| csv << line }
+ end
+
+ # test writing fields in the correct order
+ File.open(@path, "r") do |f|
+ assert_equal("b|a|c", f.gets.strip)
+ assert_equal("2|1|3", f.gets.strip)
+ assert_equal("5|4|6", f.gets.strip)
+ end
+
+ # test reading CSV with headers
+ CSV.open( @path, "r", :headers => true,
+ :col_sep => "|",
+ :converters => :all ) do |csv|
+ csv.each { |line| assert_equal(lines.shift, line.to_hash) }
+ end
+ end
+
def test_append # aliased add_row() and puts()
File.unlink(@path)
@@ -230,6 +299,6 @@ class TestCSVInterface < Test::Unit::TestCase
# shortcuts
assert_equal(STDOUT, CSV.instance.instance_eval { @io })
- assert_equal(STDOUT, CSV { |csv| csv.instance_eval { @io } })
+ assert_equal(STDOUT, CSV { |new_csv| new_csv.instance_eval { @io } })
end
end
diff --git a/test/csv/tc_row.rb b/test/csv/tc_row.rb
index a9b7f042b..3fa3784bb 100644
--- a/test/csv/tc_row.rb
+++ b/test/csv/tc_row.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_row.rb
#
@@ -286,4 +287,24 @@ class TestCSVRow < Test::Unit::TestCase
assert_equal([@row.headers.size, @row.fields.size].max, @row.size)
end
+
+ def test_inspect_shows_header_field_pairs
+ str = @row.inspect
+ @row.each do |header, field|
+ assert( str.include?("#{header.inspect}:#{field.inspect}"),
+ "Header field pair not found." )
+ end
+ end
+
+ def test_inspect_is_ascii_8bit_encoded
+ assert_equal("ASCII-8BIT", @row.inspect.encoding.name)
+ end
+
+ def test_inspect_shows_symbol_headers_as_bare_attributes
+ str = CSV::Row.new(@row.headers.map { |h| h.to_sym }, @row.fields).inspect
+ @row.each do |header, field|
+ assert( str.include?("#{header}:#{field.inspect}"),
+ "Header field pair not found." )
+ end
+ end
end
diff --git a/test/csv/tc_serialization.rb b/test/csv/tc_serialization.rb
index d9c37fde2..c8273bdb3 100644
--- a/test/csv/tc_serialization.rb
+++ b/test/csv/tc_serialization.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_serialization.rb
#
diff --git a/test/csv/tc_table.rb b/test/csv/tc_table.rb
index 028274d97..1e572d979 100644
--- a/test/csv/tc_table.rb
+++ b/test/csv/tc_table.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# tc_table.rb
#
@@ -389,4 +390,17 @@ class TestCSVTable < Test::Unit::TestCase
assert_equal(@rows.size, @table.size)
end
+
+ def test_inspect_shows_current_mode
+ str = @table.inspect
+ assert(str.include?("mode:#{@table.mode}"), "Mode not shown.")
+
+ @table.by_col!
+ str = @table.inspect
+ assert(str.include?("mode:#{@table.mode}"), "Mode not shown.")
+ end
+
+ def test_inspect_is_us_ascii_encoded
+ assert_equal("US-ASCII", @table.inspect.encoding.name)
+ end
end
diff --git a/test/csv/ts_all.rb b/test/csv/ts_all.rb
index c93052375..d380ab531 100644
--- a/test/csv/ts_all.rb
+++ b/test/csv/ts_all.rb
@@ -1,4 +1,5 @@
-#!/usr/local/bin/ruby -w
+#!/usr/bin/env ruby -w
+# encoding: UTF-8
# ts_all.rb
#
@@ -17,3 +18,4 @@ require "tc_row"
require "tc_table"
require "tc_headers"
require "tc_serialization"
+require "tc_encodings"