commit perl-DBD-CSV for openSUSE:Factory
Hello community, here is the log from the commit of package perl-DBD-CSV for openSUSE:Factory checked in at 2013-06-28 18:55:09 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/perl-DBD-CSV (Old) and /work/SRC/openSUSE:Factory/.perl-DBD-CSV.new (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Package is "perl-DBD-CSV" Changes: -------- --- /work/SRC/openSUSE:Factory/perl-DBD-CSV/perl-DBD-CSV.changes 2012-05-22 08:17:43.000000000 +0200 +++ /work/SRC/openSUSE:Factory/.perl-DBD-CSV.new/perl-DBD-CSV.changes 2013-06-28 18:55:11.000000000 +0200 @@ -1,0 +2,12 @@ +Mon Jun 17 07:54:57 UTC 2013 - coolo@suse.com + +- updated to 0.38 + * Fixed RT#80078, resulting in getline calls on undef (Benjamin Booth) + * Require latest DBI and SQL::Statement + * Updated copyright to 2013 + * Fixes for DBI-1.623 (Jens Rehsack) + * Add line/record number and position in error messages + * Improved documentation (including mje's contribution) + * Tested under 5.16.0 (installed) + +------------------------------------------------------------------- Old: ---- DBD-CSV-0.34.tgz New: ---- DBD-CSV-0.38.tgz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ perl-DBD-CSV.spec ++++++ --- /var/tmp/diff_new_pack.etFZ6t/_old 2013-06-28 18:55:12.000000000 +0200 +++ /var/tmp/diff_new_pack.etFZ6t/_new 2013-06-28 18:55:12.000000000 +0200 @@ -1,7 +1,7 @@ # # spec file for package perl-DBD-CSV # -# Copyright (c) 2012 SUSE LINUX Products GmbH, Nuernberg, Germany. +# Copyright (c) 2013 SUSE LINUX Products GmbH, Nuernberg, Germany. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -17,11 +17,11 @@ Name: perl-DBD-CSV -Version: 0.34 +Version: 0.38 Release: 0 %define cpan_name DBD-CSV Summary: DBI driver for CSV files -License: GPL-1.0+ or Artistic-1.0 +License: Artistic-1.0 or GPL-1.0+ Group: Development/Libraries/Perl Url: http://search.cpan.org/dist/DBD-CSV/ Source: http://www.cpan.org/authors/id/H/HM/HMBRAND/%{cpan_name}-%{version}.tgz @@ -29,17 +29,18 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildRequires: perl BuildRequires: perl-macros -BuildRequires: perl(DBD::File) >= 0.4 -BuildRequires: perl(DBI) >= 1.614 -BuildRequires: perl(SQL::Statement) >= 1.33 +BuildRequires: perl(DBD::File) >= 0.41 +BuildRequires: perl(DBI) >= 1.623 +BuildRequires: perl(SQL::Statement) >= 1.402 BuildRequires: perl(Test::More) >= 0.90 -BuildRequires: perl(Text::CSV_XS) >= 0.71 +BuildRequires: perl(Text::CSV_XS) >= 0.94 #BuildRequires: perl(DBD::CSV) -Requires: perl(DBD::File) >= 0.4 -Requires: perl(DBI) >= 1.614 -Requires: perl(SQL::Statement) >= 1.33 +Requires: perl(DBD::File) >= 0.41 +Requires: perl(DBI) >= 1.623 +Requires: perl(SQL::Statement) >= 1.402 Requires: perl(Test::More) >= 0.9 -Requires: perl(Text::CSV_XS) >= 0.71 +Requires: perl(Text::CSV_XS) >= 0.94 +Recommends: perl(Test::More) >= 0.98 %{perl_requires} %description ++++++ DBD-CSV-0.34.tgz -> DBD-CSV-0.38.tgz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/ChangeLog new/DBD-CSV-0.38/ChangeLog --- old/DBD-CSV-0.34/ChangeLog 2012-05-12 16:50:07.000000000 +0200 +++ new/DBD-CSV-0.38/ChangeLog 2013-01-09 20:30:30.000000000 +0100 @@ -1,3 +1,19 @@ +0.38 - 2013-01-09, H.Merijn Brand + * + +0.37 - 2013-01-09, H.Merijn Brand + * Fixed RT#80078, resulting in getline calls on undef (Benjamin Booth) + * Require latest DBI and SQL::Statement + * Updated copyright to 2013 + * Fixes for DBI-1.623 (Jens Rehsack) + +0.36 - 2012-08-22, H.Merijn Brand + * Add line/record number and position in error messages + +0.35 - 2012-05-24, H.Merijn Brand + * Improved documentation (including mje's contribution) + * Tested under 5.16.0 (installed) + 0.34 - 2012-05-12, H.Merijn Brand * Updated copyright to 2012 * Require 5.8.1, as DBI does diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/MANIFEST new/DBD-CSV-0.38/MANIFEST --- old/DBD-CSV-0.34/MANIFEST 2012-05-12 16:54:56.000000000 +0200 +++ new/DBD-CSV-0.38/MANIFEST 2013-01-09 22:06:51.000000000 +0100 @@ -28,6 +28,7 @@ t/72_csv-schema.t t/73_csv-case.t t/80_rt.t +t/85_error.t t/lib.pl examples/passwd.pl META.yml Module YAML meta-data (added by MakeMaker) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/META.json new/DBD-CSV-0.38/META.json --- old/DBD-CSV-0.34/META.json 2012-05-12 16:54:57.000000000 +0200 +++ new/DBD-CSV-0.38/META.json 2013-01-09 22:06:51.000000000 +0100 @@ -1,55 +1,72 @@ { "resources" : { - "repository" : "http://repo.or.cz/w/DBD-CSV.git", - "license" : "http://dev.perl.org/licenses/" + "repository" : { + "web" : "http://repo.or.cz/w/DBD-CSV.git", + "url" : "http://repo.or.cz/r/DBD-CSV.git", + "type" : "git" + }, + "license" : [ + "http://dev.perl.org/licenses/" + ] }, - "installdirs" : "site", - "test_requires" : { - "Test::More" : "0.90", - "Test::Harness" : "0", - "charnames" : "0", - "Cwd" : "0", - "Encode" : "0" + "meta-spec" : { + "version" : "2", + "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec" }, - "distribution_type" : "module", + "generated_by" : "Author", + "version" : "0.38", + "name" : "DBD-CSV", + "x_installdirs" : "site", "author" : [ "Jochen Wiedmann", "Jeff Zucker", "H.Merijn Brand <h.m.brand@xs4all.nl>", - "Jens Rehsack" + "Jens Rehsack <rehsack@cpan.org>" ], - "requires" : { - "perl" : "5.008001", - "Text::CSV_XS" : "0.71", - "SQL::Statement" : "1.33", - "DBD::File" : "0.40", - "DBI" : "1.614" - }, - "recommends" : { - "Test::More" : "0.98", - "perl" : "5.014002", - "Text::CSV_XS" : "0.88", - "DBI" : "1.620" - }, - "generated_by" : "Author", - "meta-spec" : { - "version" : "2.0", - "url" : "https://metacpan.org/module/CPAN::Meta::Spec?#meta-spec" - }, - "version" : "0.34", - "name" : "DBD-CSV", - "license" : "perl", - "build_requires" : { - "Config" : "0" + "dynamic_config" : 1, + "license" : [ + "perl_5" + ], + "prereqs" : { + "test" : { + "requires" : { + "Test::Harness" : "0", + "Test::More" : "0.90", + "charnames" : "0", + "Encode" : "0", + "Cwd" : "0" + } + }, + "runtime" : { + "requires" : { + "perl" : "5.008001", + "Text::CSV_XS" : "0.94", + "SQL::Statement" : "1.402", + "DBI" : "1.623", + "DBD::File" : "0.41" + }, + "recommends" : { + "perl" : "5.016002", + "Test::More" : "0.98" + } + }, + "configure" : { + "requires" : { + "ExtUtils::MakeMaker" : "0" + } + }, + "build" : { + "requires" : { + "Config" : "0" + } + } }, "provides" : { "DBD::CSV" : { - "version" : "0.34", + "version" : "0.38", "file" : "lib/DBD/CSV.pm" } }, "abstract" : "DBI driver for CSV files", - "configure_requires" : { - "ExtUtils::MakeMaker" : "0" - } + "release_status" : "stable" } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/META.yml new/DBD-CSV-0.38/META.yml --- old/DBD-CSV-0.34/META.yml 2012-05-12 16:54:57.000000000 +0200 +++ new/DBD-CSV-0.38/META.yml 2013-01-09 22:06:51.000000000 +0100 @@ -1,44 +1,41 @@ ---- #YAML:1.0 -name: DBD-CSV -version: 0.34 -abstract: DBI driver for CSV files -license: perl -author: - - Jochen Wiedmann - - Jeff Zucker - - H.Merijn Brand <h.m.brand@xs4all.nl> - - Jens Rehsack -generated_by: Author -distribution_type: module -provides: - DBD::CSV: - file: lib/DBD/CSV.pm - version: 0.34 -requires: - perl: 5.008001 - DBI: 1.614 - DBD::File: 0.40 - SQL::Statement: 1.33 - Text::CSV_XS: 0.71 -configure_requires: - ExtUtils::MakeMaker: 0 -build_requires: - Config: 0 -test_requires: - Test::Harness: 0 - Test::More: 0.90 - Encode: 0 - Cwd: 0 - charnames: 0 -recommends: - perl: 5.014002 - DBI: 1.620 - Text::CSV_XS: 0.88 - Test::More: 0.98 -installdirs: site -resources: - license: http://dev.perl.org/licenses/ - repository: http://repo.or.cz/w/DBD-CSV.git -meta-spec: - version: 1.4 - url: http://module-build.sourceforge.net/META-spec-v1.4.html +--- +abstract: DBI driver for CSV files +author: + - Jochen Wiedmann + - Jeff Zucker + - H.Merijn Brand <h.m.brand@xs4all.nl> + - Jens Rehsack <rehsack@cpan.org> +build_requires: + Config: 0 + Cwd: 0 + Encode: 0 + Test::Harness: 0 + Test::More: '0.90' + charnames: 0 +configure_requires: + ExtUtils::MakeMaker: 0 +dynamic_config: 1 +generated_by: Author +license: perl +meta-spec: + url: http://module-build.sourceforge.net/META-spec-v1.4.html + version: '1.4' +name: DBD-CSV +provides: + DBD::CSV: + file: lib/DBD/CSV.pm + version: '0.38' +recommends: + Test::More: '0.98' + perl: '5.016002' +requires: + DBD::File: '0.41' + DBI: '1.623' + SQL::Statement: '1.402' + Text::CSV_XS: '0.94' + perl: '5.008001' +resources: + license: http://dev.perl.org/licenses/ + repository: http://repo.or.cz/r/DBD-CSV.git +version: '0.38' +x_installdirs: site diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/Makefile.PL new/DBD-CSV-0.38/Makefile.PL --- old/DBD-CSV-0.34/Makefile.PL 2012-01-11 09:01:43.000000000 +0100 +++ new/DBD-CSV-0.38/Makefile.PL 2013-01-09 22:06:32.000000000 +0100 @@ -1,6 +1,6 @@ # -*- perl -*- -# Copyright (c) 2009-2012 H.Merijn Brand +# Copyright (c) 2009-2013 H.Merijn Brand require 5.008001; @@ -33,10 +33,10 @@ AUTHOR => "H.Merijn Brand <h.merijn\@xs4all.nl>", VERSION_FROM => "lib/DBD/CSV.pm", PREREQ_PM => { - "DBI" => 1.614, - "DBD::File" => 0.40, - "Text::CSV_XS" => 0.71, - "SQL::Statement" => 1.33, + "DBI" => 1.623, + "DBD::File" => 0.41, + "Text::CSV_XS" => 0.94, + "SQL::Statement" => 1.402, "Test::More" => 0.90, "Encode" => 0, "charnames" => 0, @@ -48,6 +48,9 @@ valgrind.log ) }, + macro => { + TARFLAGS => "--format=ustar -c -v -f", + }, ); $ExtUtils::MakeMaker::VERSION > 6.30 and $wm{LICENSE} = "perl"; diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/README new/DBD-CSV-0.38/README --- old/DBD-CSV-0.34/README 2012-04-29 11:11:48.000000000 +0200 +++ new/DBD-CSV-0.38/README 2013-01-02 23:51:05.000000000 +0100 @@ -14,7 +14,7 @@ reflect code quality or stability. Copying - Copyright (C) 2009-2012 by H.Merijn Brand + Copyright (C) 2009-2013 by H.Merijn Brand Copyright (C) 2004-2009 by Jeff Zucker Copyright (C) 1998-2004 by Jochen Wiedmann diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/lib/Bundle/DBD/CSV.pm new/DBD-CSV-0.38/lib/Bundle/DBD/CSV.pm --- old/DBD-CSV-0.34/lib/Bundle/DBD/CSV.pm 2012-04-29 11:05:33.000000000 +0200 +++ new/DBD-CSV-0.38/lib/Bundle/DBD/CSV.pm 2013-01-09 20:31:00.000000000 +0100 @@ -5,7 +5,7 @@ use strict; use warnings; -our $VERSION = "1.05"; +our $VERSION = "1.07"; 1; @@ -21,15 +21,15 @@ =head1 CONTENTS -DBI 1.620 +DBI 1.623 -Text::CSV_XS 0.88 +Text::CSV_XS 0.94 -SQL::Statement 1.33 +SQL::Statement 1.402 -DBD::File 0.40 +DBD::File 0.41 -DBD::CSV 0.34 +DBD::CSV 0.38 =head1 DESCRIPTION @@ -44,7 +44,7 @@ =head1 COPYRIGHT AND LICENSE -Copyright (C) 2009-2012 by H.Merijn Brand +Copyright (C) 2009-2013 by H.Merijn Brand Copyright (C) 2004-2009 by Jeff Zucker Copyright (C) 1998-2004 by Jochen Wiedmann diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/lib/DBD/CSV.pm new/DBD-CSV-0.38/lib/DBD/CSV.pm --- old/DBD-CSV-0.34/lib/DBD/CSV.pm 2012-04-29 11:01:22.000000000 +0200 +++ new/DBD-CSV-0.38/lib/DBD/CSV.pm 2013-01-09 20:30:47.000000000 +0100 @@ -19,11 +19,12 @@ use strict; -use vars qw( @ISA $VERSION $drh $err $errstr $sqlstate ); +use vars qw( @ISA $VERSION $ATTRIBUTION $drh $err $errstr $sqlstate ); @ISA = qw( DBD::File ); -$VERSION = "0.34"; +$VERSION = "0.38"; +$ATTRIBUTION = "DBD::CSV $DBD::CSV::VERSION by H.Merijn Brand"; $err = 0; # holds error code for DBI::err $errstr = ""; # holds error string for DBI::errstr @@ -61,19 +62,16 @@ Text::CSV_XS::NV (), # SQL_DOUBLE ); -@DBD::CSV::dr::ISA = qw( DBD::File::dr ); +our @ISA = qw( DBD::File::dr ); -$DBD::CSV::dr::imp_data_size = 0; -$DBD::CSV::dr::data_sources_attr = undef; - -$DBD::CSV::ATTRIBUTION = "DBD::CSV $DBD::CSV::VERSION by H.Merijn Brand"; +our $imp_data_size = 0; +our $data_sources_attr = undef; sub connect { my ($drh, $dbname, $user, $auth, $attr) = @_; my $dbh = $drh->DBD::File::dr::connect ($dbname, $user, $auth, $attr); - $dbh->{f_meta} ||= {}; - $dbh->{Active} = 1; + $dbh->{Active} = 1; $dbh; } # connect @@ -83,9 +81,8 @@ use strict; -$DBD::CSV::db::imp_data_size = 0; - -@DBD::CSV::db::ISA = qw( DBD::File::db ); +our $imp_data_size = 0; +our @ISA = qw( DBD::File::db ); sub set_versions { @@ -94,31 +91,6 @@ return $this->SUPER::set_versions (); } # set_versions -if ($DBD::File::VERSION <= 0.38) { - # Map csv_tables to f_meta. - # Not absolutely needed, but otherwise I have to write two test suites - *STORE = sub { - my ($self, @attr) = @_; - @attr && $attr[0] eq "csv_tables" and $attr[0] = "f_meta"; - $self->SUPER::STORE (@attr); - }; # STORE - - *FETCH = sub { - my ($self, @attr) = @_; - @attr && $attr[0] eq "csv_tables" and $attr[0] = "f_meta"; - $self->SUPER::FETCH (@attr); - }; # FETCH - - *DBI::db::csv_versions = *csv_versions = sub { - join "\n", - "DBD::CSV $DBD::CSV::VERSION using Text::CSV_XS-$Text::CSV_XS::VERSION", - " DBD::File $DBD::File::VERSION", - "DBI $DBI::VERSION", - "OS $^O", - "Perl $]"; - }; # csv_versions - } - my %csv_xs_attr; sub init_valid_attributes @@ -172,148 +144,22 @@ use strict; -$DBD::CSV::st::imp_data_size = 0; - -@DBD::CSV::st::ISA = qw(DBD::File::st); - -$DBD::File::VERSION <= 0.38 and *FETCH = sub { - my ($sth, $attr) = @_; - - my ($struct, @coldefs, @colnames); - - # Being a bit dirty here, as SQL::Statement::Structure does not offer - # me an interface to the data I want - $struct = $sth->{f_stmt}{struct} || {}; - @coldefs = @{ $struct->{column_defs} || [] }; - @colnames = map { $_->{name} || $_->{value} } @coldefs; - - # dangerous: this accesses the table_defs information from last CREATE TABLE statement - $attr eq "TYPE" and # 12 = VARCHAR, TYPE should be numeric - return [ map { $struct->{table_defs}{columns}{$_}{data_type} || 12 } - @colnames ]; - - $attr eq "PRECISION" and - return [ map { $struct->{table_defs}{columns}{$_}{data_length} || 0 } - @colnames ]; - - $attr eq "NULLABLE" and - return [ map { ( grep m/^NOT NULL$/ => - @{ $struct->{table_defs}{columns}{$_}{constraints} || [] } - ) ? 0 : 1 } - @colnames ]; - - return $sth->SUPER::FETCH ($attr); - }; # FETCH +our $imp_data_size = 0; +our @ISA = qw(DBD::File::st); package DBD::CSV::Statement; use strict; -use DBD::File; use Carp; -@DBD::CSV::Statement::ISA = qw(DBD::File::Statement); - -# open_table (0 is used up to and including DBI-1.1611 -# Later versions use open_file (see DBD::CSV::Table) - -$DBD::File::VERSION <= 0.38 and *open_table = sub { - my ($self, $data, $table, $createMode, $lockMode) = @_; - - my $dbh = $data->{Database}; - my $tables = $dbh->{f_meta}; - $tables->{$table} ||= {}; - my $meta = $tables->{$table} || {}; - my $csv_in = $meta->{csv_in} || $dbh->{csv_csv_in}; - unless ($csv_in) { - my %opts = ( binary => 1, auto_diag => 1 ); - - # Allow specific Text::CSV_XS options - foreach my $key (grep m/^csv_/ => keys %$dbh) { - (my $attr = $key) =~ s/csv_//; - $attr =~ m{^(?: eol | sep | quote | escape # Handled below - | tables | sql_parser_object # Not for Text::CSV_XS - | sponge_driver | version # internal - )$}x and next; - $opts{$attr} = $dbh->{$key}; - } - delete $opts{null} and - $opts{blank_is_undef} = $opts{always_quote} = 1; - - my $class = $meta->{class} || $dbh->{csv_class} || "Text::CSV_XS"; - my $eol = $meta->{eol} || $dbh->{csv_eol} || "\r\n"; - $eol =~ m/^\A(?:[\r\n]|\r\n)\Z/ or $opts{eol} = $eol; - for ([ "sep", ',' ], - [ "quote", '"' ], - [ "escape", '"' ], - ) { - my ($attr, $def) = ($_->[0]."_char", $_->[1]); - $opts{$attr} = - exists $meta->{$attr} ? $meta->{$attr} : - exists $dbh->{"csv_$attr"} ? $dbh->{"csv_$attr"} : $def; - } - $meta->{csv_in} = $class->new (\%opts) or - $class->error_diag; - $opts{eol} = $eol; - $meta->{csv_out} = $class->new (\%opts) or - $class->error_diag; - } - my $file = $meta->{file} || $table; - my $tbl = $self->SUPER::open_table ($data, $file, $createMode, $lockMode); - if ($tbl && $tbl->{fh}) { - $tbl->{csv_csv_in} = $meta->{csv_in}; - $tbl->{csv_csv_out} = $meta->{csv_out}; - if (my $types = $meta->{types}) { - # The 'types' array contains DBI types, but we need types - # suitable for Text::CSV_XS. - my $t = []; - for (@{$types}) { - $_ = $_ - ? $DBD::CSV::dr::CSV_TYPES[$_ + 6] || Text::CSV_XS::PV () - : Text::CSV_XS::PV (); - push @$t, $_; - } - $tbl->{types} = $t; - } - if ( !$createMode and - !$self->{ignore_missing_table} and $self->{command} ne "DROP") { - my $array; - my $skipRows = exists $meta->{skip_rows} - ? $meta->{skip_rows} - : exists $meta->{col_names} ? 0 : 1; - if ($skipRows--) { - $array = $tbl->fetch_row ($data) or croak "Missing first row"; - unless ($self->{raw_header}) { - s/\W/_/g for @$array; - } - $tbl->{col_names} = $array; - while ($skipRows--) { - $tbl->fetch_row ($data); - } - } - $tbl->{first_row_pos} = $tbl->{fh}->tell (); - exists $meta->{col_names} and - $array = $tbl->{col_names} = $meta->{col_names}; - if (!$tbl->{col_names} || !@{$tbl->{col_names}}) { - # No column names given; fetch first row and create default - # names. - my $ar = $tbl->{cached_row} = $tbl->fetch_row ($data); - $array = $tbl->{col_names}; - push @$array, map { "col$_" } 0 .. $#$ar; - } - my $i = 0; - $tbl->{col_nums}{$_} = $i++ for @$array; - } - } - $tbl; - }; # open_table +our @ISA = qw(DBD::File::Statement); package DBD::CSV::Table; use strict; -use DBD::File; use Carp; -@DBD::CSV::Table::ISA = qw(DBD::File::Table); +our @ISA = qw(DBD::File::Table); sub bootstrap_table_meta { @@ -383,12 +229,11 @@ $class->SUPER::table_meta_attr_changed ($meta, $attr, $value); } # table_meta_attr_changed -$DBD::File::VERSION > 0.38 and *open_file = sub { +sub open_data { my ($self, $meta, $attrs, $flags) = @_; $self->SUPER::open_file ($meta, $attrs, $flags); - my $tbl = $meta; - if ($tbl && $tbl->{fh}) { + if ($meta && $meta->{fh}) { $attrs->{csv_csv_in} = $meta->{csv_in}; $attrs->{csv_csv_out} = $meta->{csv_out}; if (my $types = $meta->{types}) { @@ -402,7 +247,7 @@ : Text::CSV_XS::PV (); push @$t, $_; } - $tbl->{types} = $t; + $meta->{types} = $t; } if (!$flags->{createMode}) { my $array; @@ -414,32 +259,48 @@ defined $meta->{skip_rows} or $meta->{skip_rows} = $skipRows; if ($skipRows--) { - $array = $attrs->{csv_csv_in}->getline ($tbl->{fh}) or + $array = $attrs->{csv_csv_in}->getline ($meta->{fh}) or croak "Missing first row due to ".$attrs->{csv_csv_in}->error_diag; unless ($meta->{raw_header}) { s/\W/_/g for @$array; } - $tbl->{col_names} = $array; + defined $meta->{col_names} or + $meta->{col_names} = $array; while ($skipRows--) { - $tbl->{csv_csv_in}->getline ($tbl->{fh}); + $attrs->{csv_csv_in}->getline ($meta->{fh}); } } - $tbl->{first_row_pos} = $tbl->{fh}->tell (); + # lockMode is set 1 for DELETE, INSERT or UPDATE + # no other case need seeking + $flags->{lockMode} and # $meta->{fh}->can ("tell") and + $meta->{first_row_pos} = $meta->{fh}->tell (); exists $meta->{col_names} and - $array = $tbl->{col_names} = $meta->{col_names}; - if (!$tbl->{col_names} || !@{$tbl->{col_names}}) { + $array = $meta->{col_names}; + if (!$meta->{col_names} || !@{$meta->{col_names}}) { # No column names given; fetch first row and create default # names. - my $ar = $tbl->{cached_row} = - $tbl->{csv_csv_in}->getline ($tbl->{fh}); - $array = $tbl->{col_names}; + my $ar = $meta->{cached_row} = + $attrs->{csv_csv_in}->getline ($meta->{fh}); + $array = $meta->{col_names}; push @$array, map { "col$_" } 0 .. $#$ar; } - my $i = 0; - $tbl->{col_nums}{$_} = $i++ for @$array; # XXX not necessary for DBI > 1.611 } } - }; # open_file + } # open_file + +no warnings 'once'; +$DBI::VERSION < 1.623 and + *open_file = \&open_data; +use warnings; + +sub _csv_diag +{ + my @diag = $_[0]->error_diag; + for (2, 3) { + defined $diag[$_] or $diag[$_] = "?"; + } + return @diag; + } # _csv_diag sub fetch_row { @@ -448,7 +309,7 @@ exists $self->{cached_row} and return $self->{row} = delete $self->{cached_row}; - my $tbl = $DBD::File::VERSION <= 0.38 ? $self : $self->{meta}; + my $tbl = $self->{meta}; my $csv = $self->{csv_csv_in} or return do { $data->set_err ($DBI::stderr, "Fetch from undefined handle"); undef }; @@ -458,9 +319,9 @@ unless ($fields) { $csv->eof and return; - my @diag = $csv->error_diag; - my $file = $DBD::File::VERSION <= 0.38 ? $self->{file} : $tbl->{f_fqfn}; - croak "Error $diag[0] while reading file $file: $diag[1]"; + my @diag = _csv_diag ($csv); + my $file = $tbl->{f_fqfn}; + croak "Error $diag[0] while reading file $file: $diag[1] \@ line $diag[3] pos $diag[2]"; } @$fields < @{$tbl->{col_names}} and push @$fields, (undef) x (@{$tbl->{col_names}} - @$fields); @@ -470,18 +331,21 @@ sub push_row { my ($self, $data, $fields) = @_; - my $tbl = $DBD::File::VERSION <= 0.38 ? $self : $self->{meta}; + my $tbl = $self->{meta}; my $csv = $self->{csv_csv_out}; my $fh = $tbl->{fh}; unless ($csv->print ($fh, $fields)) { - my @diag = $csv->error_diag; - my $file = $DBD::File::VERSION <= 0.38 ? $self->{file} : $tbl->{f_fqfn}; - croak "Error $diag[0] while writing file $file: $diag[1]"; + my @diag = _csv_diag ($csv); + my $file = $tbl->{f_fqfn}; + return do { $data->set_err ($DBI::stderr, "Error $diag[0] while writing file $file: $diag[1] \@ line $diag[3] pos $diag[2]"); undef }; } 1; } # push_row + +no warnings 'once'; *push_names = \&push_row; +use warnings; 1; @@ -545,17 +409,20 @@ =over 4 =item DBI +X<DBI> The DBI (Database independent interface for Perl), version 1.00 or a later release =item DBD::File +X<DBD::File> This is the base class for DBD::CSV, and it is part of the DBI distribution. As DBD::CSV requires version 0.38 or newer for DBD::File it effectively requires DBI version 1.611 or newer. =item SQL::Statement +X<SQL::Statement> A simple SQL engine. This module defines all of the SQL syntax for DBD::CSV, new SQL support is added with each release so you should @@ -567,8 +434,9 @@ that the test suite does not test in this mode! =item Text::CSV_XS +X<Text::CSV_XS> -This module is used for writing rows to or reading rows from CSV files. +This module is used to read and write rows in a CSV file. =back @@ -609,12 +477,14 @@ =head2 Supported SQL Syntax -All SQL processing for DBD::CSV is done by the L<SQL::Statement> module. +All SQL processing for DBD::CSV is done by SQL::Statement. See +L<SQL::Statement> for more specific information about its feature set. Features include joins, aliases, built-in and user-defined functions, and more. See L<SQL::Statement::Syntax> for a description of the SQL syntax supported in DBD::CSV. -Table names are case insensitive unless quoted. +Table- and column-names are case insensitive unless quoted. Column names +will be sanitized unless L</raw_header> is true; =head1 Using DBD::CSV with DBI @@ -680,7 +550,7 @@ Using attributes in the DSN is easier to use when the DSN is derived from an outside source (environment variable, database entry, or configure file), -whereas using all entries in the attribute hash is easier to read and to +whereas specifying entries in the attribute hash is easier to read and to maintain. =head2 Creating and dropping tables @@ -708,7 +578,7 @@ =head2 Inserting, fetching and modifying data The following examples insert some data in a table and fetch it back: -First all data in the string: +First, an example where the column data is concatenated in the SQL string: $dbh->do ("INSERT INTO $table VALUES (1, ". $dbh->quote ("foobar") . ")"); @@ -716,18 +586,18 @@ Note the use of the quote method for escaping the word "foobar". Any string must be escaped, even if it does not contain binary data. -Next an example using parameters: +Next, an example using parameters: $dbh->do ("INSERT INTO $table VALUES (?, ?)", undef, 2, "It's a string!"); -Note that you don't need to use the quote method here, this is done -automatically for you. This version is particularly well designed for +Note that you don't need to quote column data passed as parameters. +This version is particularly well designed for loops. Whenever performance is an issue, I recommend using this method. You might wonder about the C<undef>. Don't wonder, just take it as it -is. :-) It's an attribute argument that I have never ever used and -will be parsed to the prepare method as a second argument. +is. :-) It's an attribute argument that I have never used and will be +passed to the prepare method as the second argument. To retrieve data, you can use the following: @@ -779,8 +649,9 @@ =head2 Error handling -In the above examples we have never cared about return codes. Of course, -this cannot be recommended. Instead we should have written (for example): +In the above examples we have never cared about return codes. Of +course, this is not recommended. Instead we should have written (for +example): my $sth = $dbh->prepare ("SELECT * FROM $table WHERE id = ?") or die "prepare: " . $dbh->errstr (); @@ -836,30 +707,38 @@ =over 4 =item AutoCommit +X<AutoCommit> Always on =item ChopBlanks +X<ChopBlanks> Works =item NUM_OF_FIELDS +X<NUM_OF_FIELDS> Valid after C<$sth-E<gt>execute> =item NUM_OF_PARAMS +X<NUM_OF_PARAMS> Valid after C<$sth-E<gt>prepare> =item NAME +X<NAME> =item NAME_lc +X<NAME_lc> =item NAME_uc +X<NAME_uc> Valid after C<$sth-E<gt>execute>; undef for Non-Select statements. =item NULLABLE +X<NULLABLE> Not really working. Always returns an array ref of one's, as DBD::CSV does not verify input data. Valid after C<$sth-E<gt>execute>; undef for @@ -879,19 +758,24 @@ In addition to the DBI attributes, you can use the following dbh attributes: +=head2 DBD::File attributes + =over 4 =item f_dir +X<f_dir> This attribute is used for setting the directory where CSV files are -opened. Usually you set it in the dbh, it defaults to the current -directory ("."). However, it is overwritable in the statement handles. +opened. Usually you set it in the dbh and it defaults to the current +directory ("."). However, it may be overridden in statement handles. =item f_ext +X<f_ext> This attribute is used for setting the file extension. =item f_schema +X<f_schema> This attribute allows you to set the database schema name. The default is to use the owner of C<f_dir>. C<undef> is allowed, but not in the DSN part. @@ -903,52 +787,69 @@ }) or die $DBI::errstr; =item f_encoding +X<f_encoding> -This attribute allows you to set the encoding of the data. With CSV, it is -not possible to set (and remember) the encoding on a per-field basis, but -DBD::File now allows to set the encoding of the underlying file. If this -attribute is not set, or undef is passed, the file will be seen as binary. +This attribute allows you to set the encoding of the data. With CSV, it is not +possible to set (and remember) the encoding on a column basis, but DBD::File +now allows the encoding to be set on the underlying file. If this attribute is +not set, or undef is passed, the file will be seen as binary. =item f_lock +X<f_lock> -With this attribute, you can force locking mode (if locking is supported -at all) for opening tables. By default, tables are opened with a shared -lock for reading, and with an exclusive lock for writing. The supported -modes are: +With this attribute you can specify a locking mode to be used (if locking is +supported at all) for opening tables. By default, tables are opened with a +shared lock for reading, and with an exclusive lock for writing. The +supported modes are: =over 2 =item 0 +X<0> Force no locking at all. =item 1 +X<1> Only shared locks will be used. =item 2 +X<2> Only exclusive locks will be used. =back +=back + But see L<DBD::File/"KNOWN BUGS">. +=head2 Text::CSV_XS specific attributes + +=over 4 + =item csv_eol +X<csv_eol> =item csv_sep_char +X<csv_sep_char> =item csv_quote_char +X<csv_quote_char> =item csv_escape_char +X<csv_escape_char> =item csv_class +X<csv_class> =item csv_csv +X<csv_csv> The attributes I<csv_eol>, I<csv_sep_char>, I<csv_quote_char> and I<csv_escape_char> are corresponding to the respective attributes of the -Text::CSV_XS object. You want to set these attributes if you have unusual +Text::CSV_XS object. You may want to set these attributes if you have unusual CSV files like F</etc/passwd> or MS Excel generated CSV files with a semicolon as separator. Defaults are "\015\012", ';', '"' and '"', respectively. @@ -988,6 +889,7 @@ the I<csv_tables> attribute. =item csv_null +X<csv_null> With this option set, all new statement handles will set C<always_quote> and C<blank_is_undef> in the CSV parser and writer, so it knows how to @@ -999,16 +901,18 @@ $dbh->{csv_null} = 1; =item csv_tables +X<csv_tables> This hash ref is used for storing table dependent metadata. For any table it contains an element with the table name as key and another hash ref with the following attributes: =item csv_* +X<csv_*> All other attributes that start with C<csv_> and are not described above -will be passed to C<Text::CSV_XS> (without the C<csv_> prefix). these -extra options are most likely to be only useful for reading (select) +will be passed to C<Text::CSV_XS> (without the C<csv_> prefix). These +extra options are only likely to be useful for reading (select) handles. Examples: $dbh->{csv_allow_whitespace} = 1; @@ -1017,33 +921,46 @@ See the C<Text::CSV_XS> documentation for the full list and the documentation. +=back + +=head2 Driver specific attributes + =over 4 =item file +X<file> The tables file name; defaults to "$dbh->{f_dir}/$table" =item eol +X<eol> =item sep_char +X<sep_char> =item quote_char +X<quote_char> =item escape_char +X<escape_char> =item class +X<class> =item csv +X<csv> These correspond to the attributes I<csv_eol>, I<csv_sep_char>, I<csv_quote_char>, I<csv_escape_char>, I<csv_class> and I<csv_csv>. -The difference is that they work on a per-table base. +The difference is that they work on a per-table basis. =item col_names +X<col_names> =item skip_first_row +X<skip_first_row> By default DBD::CSV assumes that column names are stored in the first row of the CSV file and sanitizes them (see C<raw_header> below). If this is @@ -1056,23 +973,24 @@ C<col0>, C<col1>, ... =item raw_header +X<raw_header> Due to the SQL standard, field names cannot contain special characters -like a dot (C<.>). Following the approach of mdb_tools, all these tokens -are translated to an underscore (C<_>) when reading the first line of the -CSV file, so all field names are `sanitized'. If you do not want this to -happen, set C<raw_header> to a true value. DBD::CSV cannot guarantee that -any part in the toolchain will work if field names have those characters, +like a dot (C<.>) or a space (C< >) unless the column names are quoted. +Following the approach of mdb_tools, all these tokens are translated to an +underscore (C<_>) when reading the first line of the CSV file, so all field +names are 'sanitized'. If you do not want this to happen, set C<raw_header> +to a true value and the entries in the first line of the CSV data will be +used verbatim for column headers and field names. DBD::CSV cannot guarantee +that any part in the toolchain will work if field names have those characters, and the chances are high that the SQL statements will fail. =back -=back - It's strongly recommended to check the attributes supported by L<DBD::File/Metadata>. -Example: Suggest you want to use F</etc/passwd> as a CSV file. :-) +Example: Suppose you want to use /etc/passwd as a CSV file. :-) There simplest way is: use DBI; @@ -1089,7 +1007,7 @@ $sth = $dbh->prepare ("SELECT * FROM passwd"); Another possibility where you leave all the defaults as they are and -overwrite them on a per table base: +override them on a per table basis: require DBI; my $dbh = DBI->connect ("dbi:CSV:"); @@ -1111,6 +1029,7 @@ =over 4 =item data_sources +X<data_sources> The C<data_sources> method returns a list of sub-directories of the current directory in the form "dbi:CSV:directory=$dirname". @@ -1121,6 +1040,7 @@ my @list = $drh->data_sources (f_dir => "/usr/local/csv_data"); =item list_tables +X<list_tables> This method returns a list of file names inside $dbh->{directory}. Example: @@ -1141,9 +1061,9 @@ =item * The module is using flock () internally. However, this function is not -available on platforms. Using flock () is disabled on MacOS and Windows -95: There's no locking at all (perhaps not so important on these -operating systems, as they are for single users anyways). +available on some platforms. Use of flock () is disabled on MacOS and +Windows 95: There's no locking at all (perhaps not so important on +these operating systems, as they are for single users anyways). =back @@ -1152,6 +1072,7 @@ =over 4 =item Tests +X<Tests> Aim for a full 100% code coverage @@ -1168,28 +1089,34 @@ that is useful. =item RT +X<RT> Attack all open DBD::CSV bugs in RT =item CPAN::Forum +X<CPAN::Forum> Attack all items in http://www.cpanforum.com/dist/DBD-CSV =item Documentation +X<Documentation> Expand on error-handling, and document all possible errors. Use Text::CSV_XS::error_diag () wherever possible. =item Debugging +X<Debugging> Implement and document dbd_verbose. =item Data dictionary +X<Data dictionary> Investigate the possibility to store the data dictionary in a file like .sys$columns that can store the field attributes (type, key, nullable). =item Examples +X<Examples> Make more real-life examples from the docs in examples/ @@ -1222,7 +1149,7 @@ =head1 COPYRIGHT AND LICENSE -Copyright (C) 2009-2012 by H.Merijn Brand +Copyright (C) 2009-2013 by H.Merijn Brand Copyright (C) 2004-2009 by Jeff Zucker Copyright (C) 1998-2004 by Jochen Wiedmann diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/t/50_chopblanks.t new/DBD-CSV-0.38/t/50_chopblanks.t --- old/DBD-CSV-0.34/t/50_chopblanks.t 2010-08-06 13:42:23.000000000 +0200 +++ new/DBD-CSV-0.38/t/50_chopblanks.t 2012-11-13 08:48:57.000000000 +0100 @@ -38,8 +38,8 @@ $sth->{ChopBlanks} = 0; ok (1, "ChopBlanks 0"); ok ($sth->execute ($row->[0]), "execute"); - ok (my $r = $sth->fetch, "fetch"); - is_deeply ($r, $row, "content"); + ok (my $r = $sth->fetch, "fetch ($row->[0]:1)"); + is_deeply ($r, $row, "content ($row->[0]:1)"); $sth->{ChopBlanks} = 1; ok (1, "ChopBlanks 1"); @@ -48,8 +48,8 @@ if ($DBD::File::VERSION <= 0.38) { s/\s+$// for @$row; # Bug fixed in new DBI } - ok ($r = $sth->fetch, "fetch"); - is_deeply ($r, $row, "content"); + ok ($r = $sth->fetch, "fetch ($row->[0]:2)"); + is_deeply ($r, $row, "content ($row->[0]:2)"); } ok ($sti->finish, "finish sti"); diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/t/80_rt.t new/DBD-CSV-0.38/t/80_rt.t --- old/DBD-CSV-0.34/t/80_rt.t 2010-09-16 08:33:30.000000000 +0200 +++ new/DBD-CSV-0.38/t/80_rt.t 2012-10-11 09:18:31.000000000 +0200 @@ -261,6 +261,35 @@ ok ($dbh->do ("drop table RT$rt"), "drop"); ok ($dbh->disconnect, "disconnect"); } + +{ $rt = 80078; + ok ($rt, "RT-$rt - $desc{$rt}"); + my @lines = @{$input{$rt}}; + + my $tbl = "rt$rt"; + open my $fh, ">", "output/$tbl"; + print $fh @lines; + close $fh; + + ok (my $dbh = Connect ({ + csv_sep_char => "\t", + csv_quote_char => undef, + csv_escape_char => "\\", + csv_allow_loose_escapes => 1, + RaiseError => 1, + PrintError => 1, + }), "connect"); + $dbh->{csv_tables}{$tbl}{col_names} = []; + ok (my $sth = $dbh->prepare ("select * from $tbl"), "prepare"); + eval { + ok ($sth->execute, "execute"); + ok (!$@, "no error"); + }; + + ok ($dbh->do ("drop table $tbl"), "drop"); + ok ($dbh->disconnect, "disconnect"); + } + done_testing (); __END__ @@ -299,3 +328,6 @@ "HEADER1";"HEADER2" Volki;Bolki Zolki;Solki +�80078� - bug in DBD::CSV causes select to fail +a b c d +e f g h diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/DBD-CSV-0.34/t/85_error.t new/DBD-CSV-0.38/t/85_error.t --- old/DBD-CSV-0.34/t/85_error.t 1970-01-01 01:00:00.000000000 +0100 +++ new/DBD-CSV-0.38/t/85_error.t 2012-08-24 20:43:15.000000000 +0200 @@ -0,0 +1,44 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use Test::More; + +BEGIN { use_ok ("DBI") } +do "t/lib.pl"; + +my @tbl_def = ( + [ "id", "INTEGER", 4, 0 ], + [ "name", "CHAR", 64, 0 ], + ); + +unlink glob "output/*"; + +ok (my $dbh = Connect (), "connect"); + +ok (my $tbl = FindNewTable ($dbh), "find new test table"); + +like (my $def = TableDefinition ($tbl, @tbl_def), + qr{^create table $tbl}i, "table definition"); +ok ($dbh->do ($def), "create table"); +my $tbl_file = DbFile ($tbl); +ok (-s $tbl_file, "file exists"); +ok ($dbh->disconnect, "disconnect"); + +ok (-f $tbl_file, "file still there"); +open my $fh, ">>", $tbl_file; +print $fh qq{1, "p0wnd",",""",0\n}; # Very bad content +close $fh; + +ok ($dbh = Connect (), "connect"); +{ local $dbh->{PrintError} = 0; + local $dbh->{RaiseError} = 0; + ok (my $sth = $dbh->prepare ("select * from $tbl"), "prepare"); + is ($sth->execute, undef, "execute should fail"); + # It is safe to regex on this text, as it is NOT local dependant + like ($dbh->errstr, qr{\w+ \@ line [0-9?]+ pos [0-9?]+}, "error message"); + }; +ok ($dbh->do ("drop table $tbl"), "drop"); +ok ($dbh->disconnect, "disconnect"); + +done_testing (); -- To unsubscribe, e-mail: opensuse-commit+unsubscribe@opensuse.org For additional commands, e-mail: opensuse-commit+help@opensuse.org
participants (1)
-
root@hilbert.suse.de