#!/usr/bin/env perl # cloc -- Count Lines of Code {{{1 # Copyright (C) 2006-2024 Al Danial # First release August 2006 # # Includes code from: # - SLOCCount v2.26 # http://www.dwheeler.com/sloccount/ # by David Wheeler. # - Regexp::Common v2017060201 # https://metacpan.org/pod/Regexp::Common # by Damian Conway and Abigail. # - Win32::Autoglob 1.01 # https://metacpan.org/pod/Win32::Autoglob # by Sean M. Burke. # - Algorithm::Diff 1.1902 # https://metacpan.org/pod/Algorithm::Diff # by Tye McQueen. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details: # . # # 1}}} my $VERSION = "2.00"; # odd number == beta; even number == stable my $URL = "github.com/AlDanial/cloc"; # 'https://' pushes header too wide require 5.10.0; # use modules {{{1 use warnings; use strict; use Getopt::Long; use File::Basename; use File::Temp qw { tempfile tempdir }; use File::Find; use File::Path; use File::Spec; use IO::File; use List::Util qw( min max ); use Cwd; use POSIX qw { strftime ceil}; # Parallel::ForkManager isn't in the standard distribution. # Use it only if installed, and only if --processes=N is given. # The module load happens in get_max_processes(). my $HAVE_Parallel_ForkManager = 0; # Digest::MD5 isn't in the standard distribution. Use it only if installed. my $HAVE_Digest_MD5 = 0; eval "use Digest::MD5;"; if (defined $Digest::MD5::VERSION) { $HAVE_Digest_MD5 = 1; } else { warn "Digest::MD5 not installed; will skip file uniqueness checks.\n"; } # Time::HiRes became standard with Perl 5.8 my $HAVE_Time_HiRes = 0; eval "use Time::HiRes;"; $HAVE_Time_HiRes = 1 if defined $Time::HiRes::VERSION; my $HAVE_Rexexp_Common; # Regexp::Common isn't in the standard distribution. It will # be installed in a temp directory if necessary. eval "use Regexp::Common qw ( comment ) "; if (defined $Regexp::Common::VERSION) { $HAVE_Rexexp_Common = 1; } else { $HAVE_Rexexp_Common = 0; } my $HAVE_Algorithm_Diff = 0; # Algorithm::Diff isn't in the standard distribution. It will # be installed in a temp directory if necessary. eval "use Algorithm::Diff qw ( sdiff ) "; if (defined $Algorithm::Diff::VERSION) { $HAVE_Algorithm_Diff = 1; } else { Install_Algorithm_Diff(); } # print "2 HAVE_Algorithm_Diff = $HAVE_Algorithm_Diff\n"; # test_alg_diff($ARGV[$#ARGV - 1], $ARGV[$#ARGV]); die; # die "Hre=$HAVE_Rexexp_Common Had=$HAVE_Algorithm_Diff"; # Uncomment next two lines when building Windows executable with perl2exe # or if running on a system that already has Regexp::Common. #use Regexp::Common; #$HAVE_Rexexp_Common = 1; #perl2exe_include "Regexp/Common/whitespace.pm" #perl2exe_include "Regexp/Common/URI.pm" #perl2exe_include "Regexp/Common/URI/fax.pm" #perl2exe_include "Regexp/Common/URI/file.pm" #perl2exe_include "Regexp/Common/URI/ftp.pm" #perl2exe_include "Regexp/Common/URI/gopher.pm" #perl2exe_include "Regexp/Common/URI/http.pm" #perl2exe_include "Regexp/Common/URI/pop.pm" #perl2exe_include "Regexp/Common/URI/prospero.pm" #perl2exe_include "Regexp/Common/URI/news.pm" #perl2exe_include "Regexp/Common/URI/tel.pm" #perl2exe_include "Regexp/Common/URI/telnet.pm" #perl2exe_include "Regexp/Common/URI/tv.pm" #perl2exe_include "Regexp/Common/URI/wais.pm" #perl2exe_include "Regexp/Common/CC.pm" #perl2exe_include "Regexp/Common/SEN.pm" #perl2exe_include "Regexp/Common/number.pm" #perl2exe_include "Regexp/Common/delimited.pm" #perl2exe_include "Regexp/Common/profanity.pm" #perl2exe_include "Regexp/Common/net.pm" #perl2exe_include "Regexp/Common/zip.pm" #perl2exe_include "Regexp/Common/comment.pm" #perl2exe_include "Regexp/Common/balanced.pm" #perl2exe_include "Regexp/Common/lingua.pm" #perl2exe_include "Regexp/Common/list.pm" #perl2exe_include "File/Glob.pm" use Text::Tabs qw { expand }; use Cwd qw { cwd }; use File::Glob; # 1}}} # Usage information, options processing. {{{1 my $ON_WINDOWS = 0; $ON_WINDOWS = 1 if ($^O =~ /^MSWin/) or ($^O eq "Windows_NT"); if ($ON_WINDOWS and $ENV{'SHELL'}) { if ($ENV{'SHELL'} =~ m{^/}) { $ON_WINDOWS = 0; # make Cygwin look like Unix } else { $ON_WINDOWS = 1; # MKS defines $SHELL but still acts like Windows } } my $HAVE_Win32_Long_Path = 0; # Win32::LongPath is an optional dependency that when available on # Windows will be used to support reading files past the 255 char # path length limit. if ($ON_WINDOWS) { eval "use Win32::LongPath;"; if (defined $Win32::LongPath::VERSION) { $HAVE_Win32_Long_Path = 1; } } my $config_file = ''; if ( $ENV{'HOME'} ) { $config_file = File::Spec->catfile( $ENV{'HOME'}, '.config', 'cloc', 'options.txt'); } elsif ( $ENV{'APPDATA'} and $ON_WINDOWS ) { $config_file = File::Spec->catfile( $ENV{'APPDATA'}, 'cloc'); } # $config_file may be updated by check_alternate_config_files() my $NN = chr(27) . "[0m"; # normal $NN = "" if $ON_WINDOWS or !(-t STDOUT); # -t STDOUT: is it a terminal? my $BB = chr(27) . "[1m"; # bold $BB = "" if $ON_WINDOWS or !(-t STDOUT); my $script = basename $0; # Intended for v1.88: # --git-diff-simindex Git diff strategy #3: use git's similarity index # (git diff -M --name-status) to identify file pairs # to compare. This is especially useful to compare # files that were renamed between the commits. my $brief_usage = " cloc -- Count Lines of Code Usage: $script [options] Count physical lines of source code and comments in the given files (may be archives such as compressed tarballs or zip files) and/or recursively below the given directories or git commit hashes. Example: cloc src/ include/ main.c $script [options] --diff Compute differences of physical lines of source code and comments between any pairwise combination of directory names, archive files or git commit hashes. Example: cloc --diff Python-3.5.tar.xz python-3.6/ $script --help shows full documentation on the options. https://$URL has numerous examples and more information. "; my $usage = " Usage: $script [options] | | Count, or compute differences of, physical lines of source code in the given files (may be archives such as compressed tarballs or zip files, or git commit hashes or branch names) and/or recursively below the given directories. ${BB}Input Options${NN} --extract-with= This option is only needed if cloc is unable to figure out how to extract the contents of the input file(s) by itself. Use to extract binary archive files (e.g.: .tar.gz, .zip, .Z). Use the literal '>FILE<' as a stand-in for the actual file(s) to be extracted. For example, to count lines of code in the input files gcc-4.2.tar.gz perl-5.8.8.tar.gz on Unix use --extract-with='gzip -dc >FILE< | tar xf -' or, if you have GNU tar, --extract-with='tar zxf >FILE<' and on Windows use, for example: --extract-with=\"\\\"c:\\Program Files\\WinZip\\WinZip32.exe\\\" -e -o >FILE< .\" (if WinZip is installed there). --list-file= Take the list of file and/or directory names to process from , which has one file/directory name per line. Only exact matches are counted; relative path names will be resolved starting from the directory where cloc is invoked. Set to - to read file names from a STDIN pipe. See also --exclude-list-file, --config. --diff-list-file= Take the pairs of file names to be diff'ed from , whose format matches the output of --diff-alignment. (Run with that option to see a sample.) The language identifier at the end of each line is ignored. This enables --diff mode and bypasses file pair alignment logic. Use --diff-list-files to define the file name pairs in separate files. See also --config. --diff-list-files Compute differences in code and comments between the files and directories listed in and . Each input file should use the same format as --list-file, where there is one file or directory name per line. Only exact matches are counted; relative path names will be resolved starting from the directory where cloc is invoked. This enables --diff mode. See also --list-file, --diff-list-file, --diff. --vcs= Invoke a system call to to obtain a list of files to work on. If is 'git', then will invoke 'git ls-files' to get a file list and 'git submodule status' to get a list of submodules whose contents will be ignored. See also --git which accepts git commit hashes and branch names. If is 'svn' then will invoke 'svn list -R'. The primary benefit is that cloc will then skip files explicitly excluded by the versioning tool in question, ie, those in .gitignore or have the svn:ignore property. Alternatively may be any system command that generates a list of files. Note: cloc must be in a directory which can read the files as they are returned by . cloc will not download files from remote repositories. 'svn list -R' may refer to a remote repository to obtain file names (and therefore may require authentication to the remote repository), but the files themselves must be local. Setting to 'auto' selects between 'git' and 'svn' (or neither) depending on the presence of a .git or .svn subdirectory below the directory where cloc is invoked. --unicode Check binary files to see if they contain Unicode expanded ASCII text. This causes performance to drop noticeably. ${BB}Processing Options${NN} --autoconf Count .in files (as processed by GNU autoconf) of recognized languages. See also --no-autogen. --by-file Report results for every source file encountered. See also --fmt under 'Output Options'. --by-file-by-lang Report results for every source file encountered in addition to reporting by language. --config Read command line switches from instead of the default location of $config_file. The file should contain one switch, along with arguments (if any), per line. Blank lines and lines beginning with '#' are skipped. Options given on the command line take priority over entries read from the file. If a directory is also given with any of these switches: --list-file, --exclude-list-file, --read-lang-def, --force-lang-def, --diff-list-file and a config file exists in that directory, it will take priority over $config_file. --count-and-diff First perform direct code counts of source file(s) of and separately, then perform a diff of these. Inputs may be pairs of files, directories, or archives. If --out or --report-file is given, three output files will be created, one for each of the two counts and one for the diff. See also --diff, --diff-alignment, --diff-timeout, --ignore-case, --ignore-whitespace. --diff Compute differences in code and comments between source file(s) of and . The inputs may be any mix of files, directories, archives, or git commit hashes. Use --diff-alignment to generate a list showing which file pairs where compared. When comparing git branches, only files which have changed in either commit are compared. See also --git, --count-and-diff, --diff-alignment, --diff-list-file, --diff-timeout, --ignore-case, --ignore-whitespace. --diff-timeout Ignore files which take more than seconds to process. Default is 10 seconds. Setting to 0 allows unlimited time. (Large files with many repeated lines can cause Algorithm::Diff::sdiff() to take hours.) See also --timeout. --docstring-as-code cloc considers docstrings to be comments, but this is not always correct as docstrings represent regular strings when they appear on the right hand side of an assignment or as function arguments. This switch forces docstrings to be counted as code. --follow-links [Unix only] Follow symbolic links to directories (sym links to files are always followed). See also --stat. --force-lang=[,] Process all files that have a extension with the counter for language . For example, to count all .f files with the Fortran 90 counter (which expects files to end with .f90) instead of the default Fortran 77 counter, use --force-lang=\"Fortran 90\",f If is omitted, every file will be counted with the counter. This option can be specified multiple times (but that is only useful when is given each time). See also --script-lang, --lang-no-ext. --force-lang-def= Load language processing filters from , then use these filters instead of the built-in filters. Note: languages which map to the same file extension (for example: MATLAB/Mathematica/Objective-C/MUMPS/Mercury; Pascal/PHP; Lisp/OpenCL; Lisp/Julia; Perl/Prolog) will be ignored as these require additional processing that is not expressed in language definition files. Use --read-lang-def to define new language filters without replacing built-in filters (see also --write-lang-def, --write-lang-def-incl-dup, --config). --git Forces the inputs to be interpreted as git targets (commit hashes, branch names, et cetera) if these are not first identified as file or directory names. This option overrides the --vcs=git logic if this is given; in other words, --git gets its list of files to work on directly from git using the hash or branch name rather than from 'git ls-files'. This option can be used with --diff to perform line count diffs between git commits, or between a git commit and a file, directory, or archive. Use -v/--verbose to see the git system commands cloc issues. --git-diff-rel Same as --git --diff, or just --diff if the inputs are recognized as git targets. Only files which have changed in either commit are compared. --git-diff-all Git diff strategy #2: compare all files in the repository between the two commits. --ignore-whitespace Ignore horizontal white space when comparing files with --diff. See also --ignore-case. --ignore-case Ignore changes in case within file contents; consider upper- and lowercase letters equivalent when comparing files with --diff. See also --ignore-whitespace. --ignore-case-ext Ignore case of file name extensions. This will cause problems counting some languages (specifically, .c and .C are associated with C and C++; this switch would count .C files as C rather than C++ on *nix operating systems). File name case insensitivity is always true on Windows. --lang-no-ext= Count files without extensions using the counter. This option overrides internal logic for files without extensions (where such files are checked against known scripting languages by examining the first line for #!). See also --force-lang, --script-lang. --max-file-size= Skip files larger than megabytes when traversing directories. By default, =100. cloc's memory requirement is roughly twenty times larger than the largest file so running with files larger than 100 MB on a computer with less than 2 GB of memory will cause problems. Note: this check does not apply to files explicitly passed as command line arguments. --no-autogen[=list] Ignore files generated by code-production systems such as GNU autoconf. To see a list of these files (then exit), run with --no-autogen list See also --autoconf. --no-recurse Count files in the given directories without recursively descending below them. --original-dir [Only effective in combination with --strip-comments or --strip-code] Write the stripped files to the same directory as the original files. --only-count-files Only count files by language. Blank, comment, and code counts will be zero. --read-binary-files Process binary files in addition to text files. This is usually a bad idea and should only be attempted with text files that have embedded binary data. --read-lang-def= Load new language processing filters from and merge them with those already known to cloc. If defines a language cloc already knows about, cloc's definition will take precedence. Use --force-lang-def to over-ride cloc's definitions (see also --write-lang-def, --write-lang-def-incl-dup, --config). --script-lang=, Process all files that invoke as a #! scripting language with the counter for language . For example, files that begin with #!/usr/local/bin/perl5.8.8 will be counted with the Perl counter by using --script-lang=Perl,perl5.8.8 The language name is case insensitive but the name of the script language executable, , must have the right case. This option can be specified multiple times. See also --force-lang, --lang-no-ext. --sdir= Use as the scratch directory instead of letting File::Temp chose the location. Files written to this location are not removed at the end of the run (as they are with File::Temp). --skip-leading= Skip the first lines of each file. If a comma separated list of extensions is also given, only skip lines from those file types. Example: --skip-leading=10,cpp,h will skip the first ten lines of *.cpp and *.h files. This is useful for ignoring boilerplate text. --skip-uniqueness Skip the file uniqueness check. This will give a performance boost at the expense of counting files with identical contents multiple times (if such duplicates exist). --stat Some file systems (AFS, CD-ROM, FAT, HPFS, SMB) do not have directory 'nlink' counts that match the number of its subdirectories. Consequently cloc may undercount or completely skip the contents of such file systems. This switch forces File::Find to stat directories to obtain the correct count. File search speed will decrease. See also --follow-links. --stdin-name= Give a file name to use to determine the language for standard input. (Use - as the input name to receive source code via STDIN.) --strip-code= For each file processed, write to the current directory a version of the file which has blank and code lines, including code with (in-line comments) removed. The name of each stripped file is the original file name with . appended to it. It is written to the current directory unless --original-dir is on. --strip-comments= For each file processed, write to the current directory a version of the file which has blank and commented lines removed (in-line comments persist). The name of each stripped file is the original file name with . appended to it. It is written to the current directory unless --original-dir is on. --strip-str-comments Replace comment markers embedded in strings with 'xx'. This attempts to work around a limitation in Regexp::Common::Comment where comment markers embedded in strings are seen as actual comment markers and not strings, often resulting in a 'Complex regular subexpression recursion limit' warning and incorrect counts. There are two disadvantages to using this switch: 1/code count performance drops, and 2/code generated with --strip-comments will contain different strings where ever embedded comments are found. --sum-reports Input arguments are report files previously created with the --report-file option in plain format (eg. not JSON, YAML, XML, or SQL). Makes a cumulative set of results containing the sum of data from the individual report files. --timeout Ignore files which take more than seconds to process at any of the language's filter stages. The default maximum number of seconds spent on a filter stage is the number of lines in the file divided by one thousand. Setting to 0 allows unlimited time. See also --diff-timeout. --processes=NUM [Available only on systems with a recent version of the Parallel::ForkManager module. Not available on Windows.] Sets the maximum number of cores that cloc uses. The default value of 0 disables multiprocessing. --unix Override the operating system autodetection logic and run in UNIX mode. See also --windows, --show-os. --use-sloccount If SLOCCount is installed, use its compiled executables c_count, java_count, pascal_count, php_count, and xml_count instead of cloc's counters. SLOCCount's compiled counters are substantially faster than cloc's and may give a performance improvement when counting projects with large files. However, these cloc-specific features will not be available: --diff, --count-and-diff, --strip-code, --strip-comments, --unicode. --windows Override the operating system autodetection logic and run in Microsoft Windows mode. See also --unix, --show-os. ${BB}Filter Options${NN} --include-content= Only count files containing text that matches the given regular expression. --exclude-content= Exclude files containing text that matches the given regular expression. --exclude-dir=[,D2,] Exclude the given comma separated directories D1, D2, D3, et cetera, from being scanned. For example --exclude-dir=.cache,test will skip all files and subdirectories that have /.cache/ or /test/ as their parent directory. Directories named .bzr, .cvs, .hg, .git, .svn, and .snapshot are always excluded. This option only works with individual directory names so including file path separators is not allowed. Use --fullpath and --not-match-d= to supply a regex matching multiple subdirectories. --exclude-ext=[,[...]] Do not count files having the given file name extensions. --exclude-lang=[,L2[...]] Exclude the given comma separated languages L1, L2, L3, et cetera, from being counted. --exclude-list-file= Ignore files and/or directories whose names appear in . should have one file name per line. Only exact matches are ignored; relative path names will be resolved starting from the directory where cloc is invoked. See also --list-file, --config. --fullpath Modifies the behavior of --match-f, --not-match-f, and --not-match-d to include the file's path-- relative to the directory from which cloc is invoked--in the regex, not just the file's basename. (This does not expand each filename to include its fully qualified absolute path; instead, it uses as much of the path as is passed in to cloc.) --include-ext=[,ext2[...]] Count only languages having the given comma separated file extensions. Use --show-ext to see the recognized extensions. --include-lang=[,L2[...]] Count only the given comma separated, case- insensitive languages L1, L2, L3, et cetera. Use --show-lang to see the list of recognized languages. --match-d= Only count files in directories matching the Perl regex. For example --match-d='/(src|include)/' only counts files in directories containing /src/ or /include/. Unlike --not-match-d, --match-f, and --not-match-f, --match-d always anchors the regex to the directory from which cloc is invoked. --not-match-d= Count all files except those in directories matching the Perl regex. Only the trailing directory name is compared, for example, when counting in /usr/local/lib, only 'lib' is compared to the regex. Add --fullpath to compare parent directories, beginning from the directory where cloc is invoked, to the regex. Do not include file path separators at the beginning or end of the regex. This option may be repeated. --match-f= Only count files whose basenames match the Perl regex. For example --match-f='^[Ww]idget' only counts files that start with Widget or widget. Add --fullpath to include parent directories in the regex instead of just the basename. --not-match-f= Count all files except those whose basenames match the Perl regex. Add --fullpath to include parent directories in the regex instead of just the basename. This option may be repeated. --skip-archive= Ignore files that end with the given Perl regular expression. For example, if given --skip-archive='(zip|tar(\.(gz|Z|bz2|xz|7z))?)' the code will skip files that end with .zip, .tar, .tar.gz, .tar.Z, .tar.bz2, .tar.xz, and .tar.7z. --skip-win-hidden On Windows, ignore hidden files. ${BB}Debug Options${NN} --categorized= Save file sizes in bytes, identified languages and names of categorized files to . --counted= Save names of processed source files to . --diff-alignment= Write to a list of files and file pairs showing which files were added, removed, and/or compared during a run with --diff. This switch forces the --diff mode on. --explain= Print the filters used to remove comments for language and exit. In some cases the filters refer to Perl subroutines rather than regular expressions. An examination of the source code may be needed for further explanation. --help Print this usage information and exit. --found= Save names of every file found to . --ignored= Save names of ignored files and the reason they were ignored to . --print-filter-stages Print processed source code before and after each filter is applied. --show-ext[=] Print information about all known (or just the given) file extensions and exit. --show-lang[=] Print information about all known (or just the given) languages and exit. --show-os Print the value of the operating system mode and exit. See also --unix, --windows. -v[=] Verbose switch (optional numeric value). -verbose[=] Long form of -v. --version Print the version of this program and exit. --write-lang-def= Writes to the language processing filters then exits. Useful as a first step to creating custom language definitions. Note: languages which map to the same file extension will be excluded. (See also --force-lang-def, --read-lang-def). --write-lang-def-incl-dup= Same as --write-lang-def, but includes duplicated extensions. This generates a problematic language definition file because cloc will refuse to use it until duplicates are removed. ${BB}Output Options${NN} --3 Print third-generation language output. (This option can cause report summation to fail if some reports were produced with this option while others were produced without it.) --by-percent X Instead of comment and blank line counts, show these values as percentages based on the value of X in the denominator, where X is c meaning lines of code cm meaning lines of code + comments cb meaning lines of code + blanks cmb meaning lines of code + comments + blanks For example, if using method 'c' and your code has twice as many lines of comments as lines of code, the value in the comment column will be 200%. The code column remains a line count. --csv Write the results as comma separated values. --csv-delimiter= Use the character as the delimiter for comma separated files instead of ,. This switch forces --file-encoding= Write output files using the encoding instead of the default ASCII ( = 'UTF-7'). Examples: 'UTF-16', 'euc-kr', 'iso-8859-16'. Known encodings can be printed with perl -MEncode -e 'print join(\"\\n\", Encode->encodings(\":all\")), \"\\n\"' --fmt= Alternate text output format where is a number from 1 to 5, or -1 to -5. 'total lines' means the sum of code, comment, and blank lines. Negative values are the same as the positive values but retain, instead of deleting, the intermediate JSON file that is written. The JSON file name is randomly generated unless --out/--report-file is given. The formats are: 1: by language (same as cloc default output) 2: by language with an extra column for total lines 3: by file with language 4: by file with a total lines column 5: by file with language and a total lines column --hide-rate Do not show elapsed time, line processing rate, or file processing rates in the output header. This makes output deterministic. --json Write the results as JavaScript Object Notation (JSON) formatted output. --md Write the results as Markdown-formatted text. --out= Synonym for --report-file=. --progress-rate= Show progress update after every files are processed (default =100). Set to 0 to suppress progress output (useful when redirecting output to STDOUT). --quiet Suppress all information messages except for the final report. --report-file= Write the results to instead of STDOUT. --summary-cutoff=X:N Aggregate to 'Other' results having X lines below N where X is one of c meaning lines of code f meaning files m meaning lines of comments cm meaning lines of code + comments Appending a percent sign to N changes the calculation from straight count to percentage. Ignored with --diff or --by-file. --sql= Write results as SQL create and insert statements which can be read by a database program such as SQLite. If is -, output is sent to STDOUT. --sql-append Append SQL insert statements to the file specified by --sql and do not generate table creation statements. Only valid with the --sql option. --sql-project= Use as the project identifier for the current run. Only valid with the --sql option. --sql-style=

';
    print "<- html_header\n" if $opt_v > 2;
} # 1}}}
sub html_end {                               # {{{1
return
'
'; } # 1}}} sub die_unknown_lang { # {{{1 my ($lang, $option_name) = @_; die "Unknown language '$lang' used with $option_name option. " . "The command\n $script --show-lang\n" . "will print all recognized languages. Language names are " . "case sensitive.\n" ; } # 1}}} sub unicode_file { # {{{1 my $file = shift @_; print "-> unicode_file($file)\n" if $opt_v > 2; return 0 if (get_size($file) > 2_000_000); # don't bother trying to test binary files bigger than 2 MB my $IN = open_file('<', $file, 1); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; return 0; } my @lines = <$IN>; $IN->close; if (unicode_to_ascii( join('', @lines) )) { print "<- unicode_file()\n" if $opt_v > 2; return 1; } else { print "<- unicode_file()\n" if $opt_v > 2; return 0; } } # 1}}} sub unicode_to_ascii { # {{{1 my $string = shift @_; # A trivial attempt to convert UTF-16 little or big endian # files into ASCII. These files exhibit the following byte # sequence: # byte 1: 255 # byte 2: 254 # byte 3: ord of ASCII character # byte 4: 0 # byte 3+i: ord of ASCII character # byte 4+i: 0 # or # byte 1: 255 # byte 2: 254 # byte 3: 0 # byte 4: ord of ASCII character # byte 3+i: 0 # byte 4+i: ord of ASCII character # print "-> unicode_to_ascii()\n" if $opt_v > 2; my $length = length $string; #print "length=$length\n"; return '' if $length <= 3; my @unicode = split(//, $string); # check the first 100 characters (= 200 bytes) for big or # little endian UTF-16 encoding my $max_peek = $length < 200 ? $length : 200; my $max_for_pass = $length < 200 ? 0.9*$max_peek/2 : 90; my @view_1 = (); for (my $i = 2; $i < $max_peek; $i += 2) { push @view_1, $unicode[$i] } my @view_2 = (); for (my $i = 3; $i < $max_peek; $i += 2) { push @view_2, $unicode[$i] } my $points_1 = 0; foreach my $C (@view_1) { ++$points_1 if (32 <= ord($C) and ord($C) <= 127) or ord($C) == 13 or ord($C) == 10 or ord($C) == 9; } my $points_2 = 0; foreach my $C (@view_2) { ++$points_2 if (32 <= ord($C) and ord($C) <= 127) or ord($C) == 13 or ord($C) == 10 or ord($C) == 9; } #print "points 1: $points_1\n"; #print "points 2: $points_2\n"; #print "max_peek : $max_peek\n"; #print "max_for_pass: $max_for_pass\n"; my $offset = undef; if ($points_1 > $max_for_pass) { $offset = 2; } elsif ($points_2 > $max_for_pass) { $offset = 3; } else { print "<- unicode_to_ascii() a p1=$points_1 p2=$points_2\n" if $opt_v > 2; return ''; } # neither big or little endian UTF-16 my @ascii = (); for (my $i = $offset; $i < $length; $i += 2) { # some compound characters are made of HT (9), LF (10), or CR (13) # True HT, LF, CR are followed by 00; only add those. my $L = $unicode[$i]; if (ord($L) == 9 or ord($L) == 10 or ord($L) == 13) { my $companion; if ($points_1) { last if $i+1 >= $length; $companion = $unicode[$i+1]; } else { $companion = $unicode[$i-1]; } if (ord($companion) == 0) { push @ascii, $L; } else { push @ascii, " "; # no clue what this letter is } } else { push @ascii, $L; } } print "<- unicode_to_ascii() b p1=$points_1 p2=$points_2\n" if $opt_v > 2; return join("", @ascii); } # 1}}} sub uncompress_archive_cmd { # {{{1 my ($archive_file, ) = @_; # Wrap $archive_file in single or double quotes in the system # commands below to avoid filename chicanery (including # spaces in the names). print "-> uncompress_archive_cmd($archive_file)\n" if $opt_v > 2; my $extract_cmd = ""; my $missing = ""; if ($opt_extract_with) { ( $extract_cmd = $opt_extract_with ) =~ s/>FILE -"; } elsif ($archive_file =~ /\.tar$/ and $ON_WINDOWS) { $extract_cmd = "tar -xf \"$archive_file\""; } elsif (($archive_file =~ /\.tar\.(gz|Z)$/ or $archive_file =~ /\.tgz$/ ) and !$ON_WINDOWS) { if (external_utility_exists("gzip --version")) { if (external_utility_exists("tar --version")) { $extract_cmd = "gzip -dc '$archive_file' | tar xf -"; } else { $missing = "tar"; } } else { $missing = "gzip"; } } elsif ($archive_file =~ /\.tar\.bz2$/ and !$ON_WINDOWS) { if (external_utility_exists("bzip2 --help")) { if (external_utility_exists("tar --version")) { $extract_cmd = "bzip2 -dc '$archive_file' | tar xf -"; } else { $missing = "tar"; } } else { $missing = "bzip2"; } } elsif ($archive_file =~ /\.tar\.xz$/ and !$ON_WINDOWS) { if (external_utility_exists("unxz --version")) { if (external_utility_exists("tar --version")) { $extract_cmd = "unxz -dc '$archive_file' | tar xf -"; } else { $missing = "tar"; } } else { $missing = "bzip2"; } } elsif ($archive_file =~ /\.tar$/ and !$ON_WINDOWS) { $extract_cmd = "tar xf '$archive_file'"; } elsif ($archive_file =~ /\.src\.rpm$/i and !$ON_WINDOWS) { if (external_utility_exists("cpio --version")) { if (external_utility_exists("rpm2cpio")) { $extract_cmd = "rpm2cpio '$archive_file' | cpio -i"; } else { $missing = "rpm2cpio"; } } else { $missing = "bzip2"; } } elsif ($archive_file =~ /\.(whl|zip)$/i and !$ON_WINDOWS) { if (external_utility_exists("unzip")) { $extract_cmd = "unzip -qq -d . '$archive_file'"; } else { $missing = "unzip"; } } elsif ($archive_file =~ /\.deb$/i and !$ON_WINDOWS) { # only useful if the .deb contains source code--most # .deb files just have compiled executables if (external_utility_exists("dpkg-deb")) { $extract_cmd = "dpkg-deb -x '$archive_file' ."; } else { $missing = "dpkg-deb"; } } elsif ($ON_WINDOWS and $archive_file =~ /\.(whl|zip)$/i) { # use unzip on Windows (comes with git-for-Windows) if (external_utility_exists("unzip")) { $extract_cmd = "unzip -qq -d . \"$archive_file\" "; } else { $missing = "unzip"; } } print "<- uncompress_archive_cmd\n" if $opt_v > 2; if ($missing) { die "Unable to expand $archive_file because external\n", "utility '$missing' is not available.\n", "Another possibility is to use the --extract-with option.\n"; } else { return $extract_cmd; } } # 1}}} sub read_list_file { # {{{1 my ($file, ) = @_; # reads filenames from a STDIN pipe if $file == "-" print "-> read_list_file($file)\n" if $opt_v > 2; my @entry = (); if ($file eq "-") { # read from a STDIN pipe my $IN; open($IN, $file); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; return (); } while (<$IN>) { next if /^\s*$/ or /^\s*#/; # skip empty or commented lines s/\cM$//; # DOS to Unix chomp; push @entry, $_; } $IN->close; } else { # read from an actual file foreach my $line (read_file($file)) { next if $line =~ /^\s*$/ or $line =~ /^\s*#/; $line =~ s/\cM$//; # DOS to Unix chomp $line; push @entry, $line; } } print "<- read_list_file\n" if $opt_v > 2; return @entry; } # 1}}} sub external_utility_exists { # {{{1 my $exe = shift @_; my $success = 0; if ($ON_WINDOWS) { $success = 1 unless system $exe . ' > nul'; } else { $success = 1 unless system $exe . ' >/dev/null 2>&1'; if (!$success) { $success = 1 unless system "which" . " $exe" . ' >/dev/null 2>&1'; } } return $success; } # 1}}} sub write_xsl_file { # {{{1 print "-> write_xsl_file\n" if $opt_v > 2; my $XSL = # {{{2 ' CLOC Results

'; # 2}}} if ($opt_by_file) { $XSL .= #
{{{2 ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= '
File Blank Comment Code Language3rd Generation Equivalent Scale
Total

'; # 2}}} } if (!$opt_by_file or $opt_by_file_by_lang) { $XSL .= #
{{{2 ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= ' '; $XSL .= ' ' if $opt_3; $XSL .= '
Language Files Blank Comment CodeScale 3rd Generation Equivalent
Total
'; # 2}}} } $XSL.= <<'EO_XSL'; # {{{2
EO_XSL # 2}}} my $XSL_DIFF = <<'EO_DIFF_XSL'; # {{{2 CLOC Results

EO_DIFF_XSL # 2}}} if ($opt_by_file) { $XSL_DIFF.= <<'EO_DIFF_XSL'; # {{{2
Same
File Blank Comment Code
Modified
File Blank Comment Code
Added
File Blank Comment Code
Removed
File Blank Comment Code
EO_DIFF_XSL # 2}}} } if (!$opt_by_file or $opt_by_file_by_lang) { $XSL_DIFF.= <<'EO_DIFF_XSL'; # {{{2
Same
Language Files Blank Comment Code
Modified
Language Files Blank Comment Code
Added
Language Files Blank Comment Code
Removed
Language Files Blank Comment Code
EO_DIFF_XSL # 2}}} } $XSL_DIFF.= <<'EO_DIFF_XSL'; # {{{2
EO_DIFF_XSL # 2}}} if ($opt_diff) { write_file($CLOC_XSL, {}, ( $XSL_DIFF ) ); } else { write_file($CLOC_XSL, {}, ( $XSL ) ); } print "<- write_xsl_file\n" if $opt_v > 2; } # 1}}} sub normalize_file_names { # {{{1 print "-> normalize_file_names\n" if $opt_v > 2; my (@files, ) = @_; # Returns a hash of file names reduced to a canonical form # (fully qualified file names, all path separators changed to /, # Windows file names lowercased). Hash values are the original # file name. my %normalized = (); foreach my $F (@files) { my $F_norm = $F; if ($ON_WINDOWS) { $F_norm = lc $F_norm; # for case insensitive file name comparisons $F_norm =~ s{\\}{/}g; # Windows directory separators to Unix $F_norm =~ s{^\./}{}g; # remove leading ./ if (($F_norm !~ m{^/}) and ($F_norm !~ m{^\w:/})) { # looks like a relative path; prefix with cwd $F_norm = lc "$cwd/$F_norm"; } } else { $F_norm =~ s{^\./}{}g; # remove leading ./ if ($F_norm !~ m{^/}) { # looks like a relative path; prefix with cwd $F_norm = "$cwd/$F_norm"; } } # Remove trailing / so it does not interfere with further regex code # that does not expect it $F_norm =~ s{/+$}{}; $normalized{ $F_norm } = $F; } print "<- normalize_file_names\n" if $opt_v > 2; return %normalized; } # 1}}} sub combine_diffs { # {{{1 # subroutine by Andy (awalshe@sf.net) # https://sourceforge.net/tracker/?func=detail&aid=3261017&group_id=174787&atid=870625 my ($ra_files) = @_; print "-> combine_diffs\n" if $opt_v > 2; my $res = "$URL v $VERSION\n"; my $dl = '-'; my $width = 79; # columns are in this order my @cols = ('files', 'blank', 'comment', 'code'); my %HoH = (); foreach my $file (@{$ra_files}) { my $IN = open_file('<', $file, 1); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; next; } my $sec; while (<$IN>) { chomp; s/\cM$//; next if /^(http|Language|-----)/; if (/^[A-Za-z0-9]+/) { # section title $sec = $_; chomp($sec); $HoH{$sec} = () if ! exists $HoH{$sec}; next; } if (/^\s(same|modified|added|removed)/) { # calculated totals row my @ar = grep { $_ ne '' } split(/ /, $_); chomp(@ar); my $ttl = shift @ar; my $i = 0; foreach(@ar) { my $t = "${ttl}${dl}${cols[$i]}"; $HoH{$sec}{$t} = 0 if ! exists $HoH{$sec}{$t}; $HoH{$sec}{$t} += $_; $i++; } } } $IN->close; } # rows are in this order my @rows = ('same', 'modified', 'added', 'removed'); $res .= sprintf("%s\n", "-" x $width); $res .= sprintf("%-19s %14s %14s %14s %14s\n", 'Language', $cols[0], $cols[1], $cols[2], $cols[3]); $res .= sprintf("%s\n", "-" x $width); # no inputs? %HoH will be empty return $res unless %HoH; for my $sec ( keys %HoH ) { next if $sec =~ /SUM:/; next unless defined $HoH{$sec}; # eg, the header line $res .= "$sec\n"; foreach (@rows) { $res .= sprintf(" %-18s %14s %14s %14s %14s\n", $_, $HoH{$sec}{"${_}${dl}${cols[0]}"}, $HoH{$sec}{"${_}${dl}${cols[1]}"}, $HoH{$sec}{"${_}${dl}${cols[2]}"}, $HoH{$sec}{"${_}${dl}${cols[3]}"}); } } $res .= sprintf("%s\n", "-" x $width); my $sec = 'SUM:'; $res .= "$sec\n"; foreach (@rows) { $res .= sprintf(" %-18s %14s %14s %14s %14s\n", $_, $HoH{$sec}{"${_}${dl}${cols[0]}"}, $HoH{$sec}{"${_}${dl}${cols[1]}"}, $HoH{$sec}{"${_}${dl}${cols[2]}"}, $HoH{$sec}{"${_}${dl}${cols[3]}"}); } $res .= sprintf("%s\n", "-" x $width); print "<- combine_diffs\n" if $opt_v > 2; return $res; } # 1}}} sub combine_csv_diffs { # {{{1 my ($delimiter, $ra_files) = @_; print "-> combine_csv_diffs\n" if $opt_v > 2; my %sum = (); # sum{ language } = array of 17 values foreach my $file (@{$ra_files}) { my $IN = open_file('<', $file, 1); if (!defined $IN) { warn "Unable to read $file; ignoring.\n"; next; } my $sec; while (<$IN>) { next if /^Language${delimiter}\s==\sfiles${delimiter}/; chomp; my @words = split(/$delimiter/); my $n_col = scalar(@words); if ($n_col != 18) { warn "combine_csv_diffs(): Parse failure line $. of $file\n"; warn "Expected 18 columns, got $n_col\n"; die; } my $Lang = $words[0]; my @count = map { int($_) } @words[1..16]; if (defined $sum{$Lang}) { for (my $i = 0; $i < 16; $i++) { $sum{$Lang}[$i] += $count[$i]; } } else { @{$sum{$Lang}} = @count; } } $IN->close; } my @header = ("Language", "== files", "!= files", "+ files", "- files", "== blank", "!= blank", "+ blank", "- blank", "== comment", "!= comment", "+ comment", "- comment", "== code", "!= code", "+ code", "- code", "$URL v $VERSION" ); my $res = join("$delimiter ", @header) . "$delimiter\n"; foreach my $Lang (sort keys %sum) { $res .= $Lang . "$delimiter "; for (my $i = 0; $i < 16; $i++) { $res .= $sum{$Lang}[$i] . "$delimiter "; } $res .= "\n"; } print "<- combine_csv_diffs\n" if $opt_v > 2; return $res; } # 1}}} sub get_time { # {{{1 if ($HAVE_Time_HiRes) { return Time::HiRes::time(); } else { return time(); } } # 1}}} sub really_is_D { # {{{1 # Ref bug 131, files ending with .d could be init.d scripts # instead of D language source files. my ($file , # in $rh_Err , # in hash of error codes $raa_errors , # out ) = @_; print "-> really_is_D($file)\n" if $opt_v > 2; my ($possible_script, $L) = peek_at_first_line($file, $rh_Err, $raa_errors); print "<- really_is_D($file)\n" if $opt_v > 2; return $possible_script; # null string if D, otherwise a language } # 1}}} sub no_autogen_files { # {{{1 # ref https://github.com/AlDanial/cloc/issues/151 my ($print,) = @_; print "-> no_autogen($print)\n" if $opt_v > 2; # These sometimes created manually? # acinclude.m4 # configure.ac # Makefile.am my @files = qw ( aclocal.m4 announce-gen autogen.sh bootstrap compile config.guess config.h.in config.rpath config.status config.sub configure configure.in depcomp gendocs.sh gitlog-to-changelog git-version-gen gnupload gnu-web-doc-update install-sh libtool libtool.m4 link-warning.h ltmain.sh lt~obsolete.m4 ltoptions.m4 ltsugar.m4 ltversion.in ltversion.m4 Makefile.in mdate-sh missing mkinstalldirs test-driver texinfo.tex update-copyright useless-if-before-free vc-list-files ylwrap ); if ($print) { printf "cloc will ignore these %d files with --no-autogen:\n", scalar @files; foreach my $F (@files) { print " $F\n"; } print "Additionally, Go files with '// Code generated by .* DO NOT EDIT.'\n"; print "on the first line are ignored.\n"; } print "<- no_autogen()\n" if $opt_v > 2; return @files; } # 1}}} sub load_from_config_file { # {{{1 # Supports all options except --config itself which would # be pointless. my ($config_file, $rs_by_file , $rs_by_file_by_lang , $rs_categorized , $rs_counted , $rs_include_ext , $rs_include_lang , $rs_include_content , $rs_exclude_content , $rs_exclude_lang , $rs_exclude_dir , $rs_exclude_list_file , $rs_explain , $rs_extract_with , $rs_found , $rs_count_diff , $rs_diff_list_files , $rs_diff , $rs_diff_alignment , $rs_diff_timeout , $rs_timeout , $rs_html , $rs_ignored , $rs_quiet , $rs_force_lang_def , $rs_read_lang_def , $rs_show_ext , $rs_show_lang , $rs_progress_rate , $rs_print_filter_stages , $rs_report_file , $ra_script_lang , $rs_sdir , $rs_skip_uniqueness , $rs_strip_code , $rs_strip_comments , $rs_original_dir , $rs_sum_reports , $rs_hide_rate , $rs_processes , $rs_unicode , $rs_3 , $rs_v , $rs_vcs , $rs_version , $rs_write_lang_def , $rs_write_lang_def_incl_dup, $rs_xml , $rs_xsl , $ra_force_lang , $rs_lang_no_ext , $rs_yaml , $rs_csv , $rs_csv_delimiter , $rs_json , $rs_md , $rs_fullpath , $rs_match_f , $ra_not_match_f , $rs_match_d , $ra_not_match_d , $rs_list_file , $rs_help , $rs_skip_win_hidden , $rs_read_binary_files , $rs_sql , $rs_sql_project , $rs_sql_append , $rs_sql_style , $rs_inline , $rs_exclude_ext , $rs_ignore_whitespace , $rs_ignore_case , $rs_ignore_case_ext , $rs_follow_links , $rs_autoconf , $rs_sum_one , $rs_by_percent , $rs_stdin_name , $rs_force_on_windows , $rs_force_on_unix , $rs_show_os , $rs_skip_archive , $rs_max_file_size , $rs_use_sloccount , $rs_no_autogen , $rs_force_git , $rs_strip_str_comments , $rs_file_encoding , $rs_docstring_as_code , $rs_stat , ) = @_; # look for runtime configuration file in # $ENV{'HOME'}/.config/cloc/options.txt -> POSIX # $ENV{'APPDATA'} . 'cloc' print "-> load_from_config_file($config_file)\n" if $opt_v and $opt_v > 2; if (!is_file($config_file)) { print "<- load_from_config_file() (no such file: $config_file)\n" if $opt_v and $opt_v > 2; return; } elsif (!can_read($config_file)) { print "<- load_from_config_file() (unable to read $config_file)\n" if $opt_v and $opt_v > 2; return; } print "Reading options from $config_file.\n" if defined $opt_v; my $has_force_lang = @{$ra_force_lang}; my $has_script_lang = @{$ra_script_lang}; my @lines = read_file($config_file); foreach (@lines) { next if /^\s*$/ or /^\s*#/; s/\s*--//; s/^\s+//; if (!defined ${$rs_by_file} and /^(by_file|by-file)/) { ${$rs_by_file} = 1; } elsif (!defined ${$rs_by_file_by_lang} and /^(by_file_by_lang|by-file-by-lang)/) { ${$rs_by_file_by_lang} = 1; } elsif (!defined ${$rs_categorized} and /^categorized(=|\s+)(.*?)$/) { ${$rs_categorized} = $2; } elsif (!defined ${$rs_counted} and /^counted(=|\s+)(.*?)$/) { ${$rs_counted} = $2; } elsif (!defined ${$rs_include_ext} and /^(?:include_ext|include-ext)(=|\s+)(.*?)$/) { ${$rs_include_ext} = $2; } elsif (!defined ${$rs_include_lang} and /^(?:include_lang|include-lang)(=|\s+)(.*?)$/) { ${$rs_include_lang} = $2; } elsif (!defined ${$rs_include_content} and /^(?:include_content|include-content)(=|\s+)(.*?)$/) { ${$rs_include_content} = $2; } elsif (!defined ${$rs_exclude_content} and /^(?:exclude_content|exclude-content)(=|\s+)(.*?)$/) { ${$rs_exclude_content} = $2; } elsif (!defined ${$rs_exclude_lang} and /^(?:exclude_lang|exclude-lang)(=|\s+)(.*?)$/) { ${$rs_exclude_lang} = $2; } elsif (!defined ${$rs_exclude_dir} and /^(?:exclude_dir|exclude-dir)(=|\s+)(.*?)$/) { ${$rs_exclude_dir} = $2; } elsif (!defined ${$rs_explain} and /^explain(=|\s+)(.*?)$/) { ${$rs_explain} = $2; } elsif (!defined ${$rs_extract_with} and /^(?:extract_with|extract-with)(=|\s+)(.*?)$/) { ${$rs_extract_with} = $2; } elsif (!defined ${$rs_found} and /^found(=|\s+)(.*?)$/) { ${$rs_found} = $2; } elsif (!defined ${$rs_count_diff} and /^(count_and_diff|count-and-diff)/) { ${$rs_count_diff} = 1; } elsif (!defined ${$rs_diff_list_files} and /^(diff_list_files|diff-list-files)/) { ${$rs_diff_list_files} = 1; } elsif (!defined ${$rs_diff} and /^diff/) { ${$rs_diff} = 1; } elsif (!defined ${$rs_diff_alignment} and /^(?:diff-alignment|diff_alignment)(=|\s+)(.*?)$/) { ${$rs_diff_alignment} = $2; } elsif (!defined ${$rs_diff_timeout} and /^(?:diff-timeout|diff_timeout)(=|\s+)i/) { ${$rs_diff_timeout} = $1; } elsif (!defined ${$rs_timeout} and /^timeout(=|\s+)i/) { ${$rs_timeout} = $1; } elsif (!defined ${$rs_html} and /^html/) { ${$rs_html} = 1; } elsif (!defined ${$rs_ignored} and /^ignored(=|\s+)(.*?)$/) { ${$rs_ignored} = $2; } elsif (!defined ${$rs_quiet} and /^quiet/) { ${$rs_quiet} = 1; } elsif (!defined ${$rs_force_lang_def} and /^(?:force_lang_def|force-lang-def)(=|\s+)(.*?)$/) { ${$rs_force_lang_def} = $2; } elsif (!defined ${$rs_read_lang_def} and /^(?:read_lang_def|read-lang-def)(=|\s+)(.*?)$/) { ${$rs_read_lang_def} = $2; } elsif (!defined ${$rs_progress_rate} and /^(?:progress_rate|progress-rate)(=|\s+)(\d+)/) { ${$rs_progress_rate} = $2; } elsif (!defined ${$rs_print_filter_stages} and /^(print_filter_stages|print-filter-stages)/) { ${$rs_print_filter_stages}= 1; } elsif (!defined ${$rs_report_file} and /^(?:report_file|report-file)(=|\s+)(.*?)$/) { ${$rs_report_file} = $2; } elsif (!defined ${$rs_report_file} and /^out(=|\s+)(.*?)$/) { ${$rs_report_file} = $2; } elsif (!defined ${$rs_sdir} and /^sdir(=|\s+)(.*?)$/) { ${$rs_sdir} = $2; } elsif (!defined ${$rs_skip_uniqueness} and /^(skip_uniqueness|skip-uniqueness)/) { ${$rs_skip_uniqueness} = 1; } elsif (!defined ${$rs_strip_code} and /^(?:strip_code|strip-code)(=|\s+)(.*?)$/) { ${$rs_strip_code} = $2; } elsif (!defined ${$rs_strip_comments} and /^(?:strip_comments|strip-comments)(=|\s+)(.*?)$/) { ${$rs_strip_comments} = $2; } elsif (!defined ${$rs_original_dir} and /^(original_dir|original-dir)/) { ${$rs_original_dir} = 1; } elsif (!defined ${$rs_sum_reports} and /^(sum_reports|sum-reports)/) { ${$rs_sum_reports} = 1; } elsif (!defined ${$rs_hide_rate} and /^(hid_rate|hide-rate)/) { ${$rs_hide_rate} = 1; } elsif (!defined ${$rs_processes} and /^processes(=|\s+)(\d+)/) { ${$rs_processes} = $2; } elsif (!defined ${$rs_unicode} and /^unicode/) { ${$rs_unicode} = 1; } elsif (!defined ${$rs_3} and /^3/) { ${$rs_3} = 1; } elsif (!defined ${$rs_vcs} and /^vcs(=|\s+)(\S+)/) { ${$rs_vcs} = $2; } elsif (!defined ${$rs_version} and /^version/) { ${$rs_version} = 1; } elsif (!defined ${$rs_write_lang_def} and /^(?:write_lang_def|write-lang-def)(=|\s+)(.*?)$/) { ${$rs_write_lang_def} = $2; } elsif (!defined ${$rs_write_lang_def_incl_dup} and /^(?:write_lang_def_incl_dup|write-lang-def-incl-dup)(=|\s+)(.*?)$/) { ${$rs_write_lang_def_incl_dup} = $2; } elsif (!defined ${$rs_xml} and /^xml/) { ${$rs_xml} = 1; } elsif (!defined ${$rs_xsl} and /^xsl(=|\s+)(.*?)$/) { ${$rs_xsl} = $2; } elsif (!defined ${$rs_lang_no_ext} and /^(?:lang_no_ext|lang-no-ext)(=|\s+)(.*?)$/) { ${$rs_lang_no_ext} = $2; } elsif (!defined ${$rs_yaml} and /^yaml/) { ${$rs_yaml} = 1; } elsif (!defined ${$rs_csv} and /^csv/) { ${$rs_csv} = 1; } elsif (!defined ${$rs_csv_delimiter} and /^(?:csv_delimiter|csv-delimiter)(=|\s+)(.*?)$/) { ${$rs_csv_delimiter} = $2; } elsif (!defined ${$rs_json} and /^json/) { ${$rs_json} = 1; } elsif (!defined ${$rs_md} and /^md/) { ${$rs_md} = 1; } elsif (!defined ${$rs_fullpath} and /^fullpath/) { ${$rs_fullpath} = 1; } elsif (!defined ${$rs_match_f} and /^(?:match_f|match-f)(=|\s+)(.*?)$/) { ${$rs_match_f} = $2; } elsif (! @{$ra_not_match_f} and /^(?:not_match_f|not-match-f)(=|\s+)(.*?)$/) { push @{$ra_not_match_f} , $2; } elsif (!defined ${$rs_match_d} and /^(?:match_d|match-d)(=|\s+)(.*?)$/) { ${$rs_match_d} = $2; } elsif (! @{$ra_not_match_d} and /^(?:not_match_d|not-match-d)(=|\s+)(.*?)$/) { push @{$ra_not_match_d} , $2; } elsif (!defined ${$rs_list_file} and /^(?:list_file|list-file)(=|\s+)(.*?)$/) { ${$rs_list_file} = $2; } elsif (!defined ${$rs_help} and /^help/) { ${$rs_help} = 1; } elsif (!defined ${$rs_skip_win_hidden} and /^(skip_win_hidden|skip-win-hidden)/) { ${$rs_skip_win_hidden} = 1; } elsif (!defined ${$rs_read_binary_files} and /^(read_binary_files|read-binary-files)/) { ${$rs_read_binary_files} = 1; } elsif (!defined ${$rs_sql} and /^sql(=|\s+)(.*?)$/) { ${$rs_sql} = $2; } elsif (!defined ${$rs_sql_project} and /^(?:sql_project|sql-project)(=|\s+)(.*?)$/) { ${$rs_sql_project} = $2; } elsif (!defined ${$rs_sql_append} and /^(sql_append|sql-append)/) { ${$rs_sql_append} = 1; } elsif (!defined ${$rs_sql_style} and /^(?:sql_style|sql-style)(=|\s+)(.*?)$/) { ${$rs_sql_style} = $2; } elsif (!defined ${$rs_inline} and /^inline/) { ${$rs_inline} = 1; } elsif (!defined ${$rs_exclude_ext} and /^(?:exclude_ext|exclude-ext)(=|\s+)(.*?)$/) { ${$rs_exclude_ext} = $2; } elsif (!defined ${$rs_ignore_whitespace} and /^(ignore_whitespace|ignore-whitespace)/) { ${$rs_ignore_whitespace} = 1; } elsif (!defined ${$rs_ignore_case_ext} and /^(ignore_case_ext|ignore-case-ext)/) { ${$rs_ignore_case_ext} = 1; } elsif (!defined ${$rs_ignore_case} and /^(ignore_case|ignore-case)/) { ${$rs_ignore_case} = 1; } elsif (!defined ${$rs_follow_links} and /^(follow_links|follow-links)/) { ${$rs_follow_links} = 1; } elsif (!defined ${$rs_autoconf} and /^autoconf/) { ${$rs_autoconf} = 1; } elsif (!defined ${$rs_sum_one} and /^(sum_one|sum-one)/) { ${$rs_sum_one} = 1; } elsif (!defined ${$rs_by_percent} and /^(?:by_percent|by-percent)(=|\s+)(.*?)$/) { ${$rs_by_percent} = $2; } elsif (!defined ${$rs_stdin_name} and /^(?:stdin_name|stdin-name)(=|\s+)(.*?)$/) { ${$rs_stdin_name} = $2; } elsif (!defined ${$rs_force_on_windows} and /^windows/) { ${$rs_force_on_windows} = 1; } elsif (!defined ${$rs_force_on_unix} and /^unix/) { ${$rs_force_on_unix} = 1; } elsif (!defined ${$rs_show_os} and /^(show_os|show-os)/) { ${$rs_show_os} = 1; } elsif (!defined ${$rs_skip_archive} and /^(?:skip_archive|skip-archive)(=|\s+)(.*?)$/) { ${$rs_skip_archive} = $2; } elsif (!defined ${$rs_max_file_size} and /^(?:max_file_size|max-file-size)(=|\s+)(\d+)/) { ${$rs_max_file_size} = $2; } elsif (!defined ${$rs_use_sloccount} and /^(use_sloccount|use-sloccount)/) { ${$rs_use_sloccount} = 1; } elsif (!defined ${$rs_no_autogen} and /^(no_autogen|no-autogen)/) { ${$rs_no_autogen} = 1; } elsif (!defined ${$rs_force_git} and /^git/) { ${$rs_force_git} = 1; } elsif (!defined ${$rs_exclude_list_file} and /^(?:exclude_list_file|exclude-list-file)(=|\s+)(.*?)$/) { ${$rs_exclude_list_file} = $2; } elsif (!defined ${$rs_v} and /^(verbose|v)((=|\s+)(\d+))?/) { if (!defined $4) { ${$rs_v} = 0; } else { ${$rs_v} = $4; } } elsif (!$has_script_lang and /^(?:script_lang|script-lang)(=|\s+)(.*?)$/) { push @{$ra_script_lang} , $2; } elsif (!$has_force_lang and /^(?:force_lang|force-lang)(=|\s+)(.*?)$/) { push @{$ra_force_lang} , $2; } elsif (!defined ${$rs_show_ext} and /^(show_ext|show-ext)((=|\s+)(.*))?$/) { if (!defined $4) { ${$rs_show_ext} = 0; } else { ${$rs_show_ext} = $4; } } elsif (!defined ${$rs_show_lang} and /^(show_lang|show-lang)((=|\s+)(.*))?s/){ if (!defined $4) { ${$rs_show_lang} = 0; } else { ${$rs_show_lang} = $4; } } elsif (!defined ${$rs_strip_str_comments} and /^(strip_str_comments|strip-str-comments)/) { ${$rs_strip_str_comments} = 1; } elsif (!defined ${$rs_file_encoding} and /^(?:file_encoding|file-encoding)(=|\s+)(\S+)/) { ${$rs_file_encoding} = $2; } elsif (!defined ${$rs_docstring_as_code} and /^(docstring_as_code|docstring-as-code)/) { ${$rs_docstring_as_code} = 1; } elsif (!defined ${$rs_stat} and /stat/) { ${$rs_stat} = 1; } } } # 1}}} sub trick_pp_packer_encode { # {{{1 use Encode; # PAR::Packer gives 'Unknown PerlIO layer "encoding"' unless it is # forced into using this module. my ($OUT, $JunkFile) = tempfile(UNLINK => 1); # delete on exit open($OUT, "> :encoding(utf8)", $JunkFile); close($OUT); } # 1}}} sub really_is_smarty { # {{{1 # Given filename, returns TRUE if its contents look like Smarty template my ($filename, ) = @_; print "-> really_is_smarty($filename)\n" if $opt_v > 2; my @lines = read_file($filename); my $points = 0; foreach my $L (@lines) { if (($L =~ /\{(if|include)\s/) or ($L =~ /\{\/if\}/) or ($L =~ /(\{\*|\*\})/) or ($L =~ /\{\$\w/)) { ++$points; } last if $points >= 2; } print "<- really_is_smarty(points=$points)\n" if $opt_v > 2; return $points >= 2; } # 1}}} sub check_alternate_config_files { # {{{1 my ($list_file, $exclude_list_file, $read_lang_def, $force_lang_def, $diff_list_file, ) = @_; my $found_it = ""; foreach my $file ($list_file, $exclude_list_file, $read_lang_def, $force_lang_def, $diff_list_file ) { next unless defined $file; my $dir = dirname $file; next unless can_read($dir) and is_dir($dir); my $bn = basename $config_file; if (can_read("$dir/$bn")) { $found_it = "$dir/$bn"; print "Using configuration file $found_it\n" if $opt_v; last; } } return $found_it; } # 1}}} sub write_null_results { # {{{ my ($json, $xml, $report_file,) = @_; print "-> write_null_results\n" if $opt_v > 2; if ((defined $json) or (defined $xml)) { my $line = ""; if (defined $json) { $line = "{}"; } else { $line = ''; } if (defined $report_file) { open OUT, ">$report_file" or die "Cannot write to $report_file $!\n"; print OUT "$line\n"; close OUT; } else { print "$line\n"; } } print "<- write_null_results\n" if $opt_v > 2; } # }}} sub glob2regex { # {{{ # convert simple xpath-style glob pattern to a regex my $globstr = shift; my $re = $globstr; $re =~ s{^["']}{}; $re =~ s{^\.\/}{}; $re =~ s{["']$}{}; $re =~ s{\.}{\\.}g; $re =~ s{\*\*}{\cx}g; # ctrl x = .*? $re =~ s{\*}{\cy}g; # ctrl y = [^/]* $re =~ s{\cx}{.*?}g; $re =~ s{\cy}{[^/]*}g; return '^' . $re . '$'; } # }}} sub load_json { # {{{1 # # Load a cloc-generated JSON file into %contents # $contents{filename}{blank|comment|code|language} = value # then print in a variety of formats. # my ($file, ) = @_; my %contents = (); my $heading = undef; open IN, $file or die "failed load_json($file)"; while () { if (/^{?"(.*?)"/) { $heading = $1; } else { if (/^\s+"(.*?)"\s*:\s+(\d+(\.\d+)?)\b/) { # numeric value $contents{$heading}{$1} = $2; } elsif (/^\s+"(.*?)"\s*:\s+"(.*?)"/) { $contents{$heading}{$1} = $2; } } } close IN; my $url = $contents{'header'}{'cloc_url'}; my $ver = $contents{'header'}{'cloc_version'}; my $sec = $contents{'header'}{'elapsed_seconds'}; my $n_file = $contents{'header'}{'n_files'}; my $n_line = $contents{'header'}{'n_lines'}; $sec = $sec == 0 ? 1.0e-3 : $sec; my $header = sprintf "%s v %s T=%.2f s (%.1f files/s, %.1f lines/s)", $url, $ver, $sec, $n_file/$sec, $n_line/$sec; delete $contents{'header'}; delete $contents{'SUM'}; my @file_list = (sort { $contents{$b}{'code'} <=> $contents{$a}{'code'} } keys %contents ); #die Dumper(\%contents); # Determine column widths for output my $file_len = 0; my $lang_len = 0; foreach my $file (keys %contents) { my $flen = length $file; my $llen = length $contents{$file}{'language'}; $file_len = $file_len > $flen ? $file_len : $flen; $lang_len = $lang_len > $llen ? $lang_len : $llen; } return $file_len, $lang_len, $header, %contents; } # 1}}} sub print_format_n { # {{{1 # by file with # format 1 : Language | files | blank | comment | code # format 2 : Language | files | blank | comment | code | total # format 3 : File | Language | blank | comment | code # format 4 : File | blank | comment | code | total # format 5 : File | Language | blank | comment | code | total my ($format, $file_len, $lang_len, $header, %contents) = @_; my %str_fmt = ( 1 => sprintf("%%-%ds %%7s %%7s %%7s %%7s\n", $lang_len), 2 => sprintf("%%-%ds %%7s %%7s %%7s %%7s %%7s\n", $lang_len), 3 => sprintf("%%-%ds %%-%ds %%7s %%7s %%7s\n", $file_len, $lang_len), 4 => sprintf("%%-%ds %%7s %%7s %%7s %%7s\n", $file_len), 5 => sprintf("%%-%ds %%-%ds %%7s %%7s %%7s %%7s\n", $file_len, $lang_len), ); my %val_fmt = ( 1 => sprintf("%%-%ds %%7d %%7d %%7d %%7d\n", $lang_len), 2 => sprintf("%%-%ds %%7d %%7d %%7d %%7d %%7d\n", $lang_len), 3 => sprintf("%%-%ds %%-%ds %%7d %%7d %%7d\n", $file_len, $lang_len), 4 => sprintf("%%-%ds %%7d %%7d %%7d %%7d\n", $file_len), 5 => sprintf("%%-%ds %%-%ds %%7d %%7d %%7d %%7d\n", $file_len, $lang_len), ); my %language = (); foreach my $file (keys %contents) { my $lang = $contents{$file}{'language'}; $language{$lang}{'files'} += 1; foreach my $category ('blank', 'comment', 'code',) { $language{$lang}{$category} += $contents{$file}{$category}; $language{$lang}{'total'} += $contents{$file}{$category}; } } my @file_list = (sort { $contents{$b}{'code'} <=> $contents{$a}{'code'} } keys %contents ); my @lang_list = (sort { $language{$b}{'code'} <=> $language{$a}{'code'} } keys %language ); my %hyphens = ( 1 => "-" x ($lang_len + 4*9), 2 => "-" x ($lang_len + 5*9), 3 => "-" x ($lang_len + $file_len + 2 + 3*9), 4 => "-" x ($file_len + 4*9), 5 => "-" x ($lang_len + $file_len + 2 + 4*9), ); my %col_headings = ( 1 => ["Language", "files", "blank", "comment", "code"], 2 => ["Language", "files", "blank", "comment", "code", "Total"], 3 => ["File", "Language", "blank", "comment", "code"], 4 => ["File", "blank", "comment", "code", "Total"], 5 => ["File", "Language", "blank", "comment", "code", "Total"], ); print "$header\n"; print "$hyphens{$format}\n"; printf $str_fmt{$format}, @{$col_headings{$format}}; print "$hyphens{$format}\n"; my ($n_files, $n_blank, $n_comment, $n_code, $n_total) = (0, 0, 0, 0, 0); my @out; if ($format < 3) { # by language foreach my $lang (@lang_list) { my ($nF, $nB, $nCm, $nCo) = ($language{$lang}{'files'}, $language{$lang}{'blank'}, $language{$lang}{'comment'}, $language{$lang}{'code'}); if ($format == 1) { @out = ($lang, $nF, $nB, $nCm, $nCo); } else { @out = ($lang, $nF, $nB, $nCm, $nCo, $nB + $nCm + $nCo); } printf $val_fmt{$format}, @out; $n_files += $nF; $n_blank += $nB; $n_comment += $nCm; $n_code += $nCo; $n_total += $nB + $nCm + $nCo; } } else { # by file foreach my $file (@file_list) { my ($nB, $nCm, $nCo) = ($contents{$file}{'blank'}, $contents{$file}{'comment'}, $contents{$file}{'code'}); my $lang = $contents{$file}{'language'}; if ($format == 1) { } elsif ($format == 3) { @out = ($file, $lang, $nB, $nCm, $nCo); } elsif ($format == 4) { @out = ($file, $nB, $nCm, $nCo, $nB + $nCm + $nCo); } else { @out = ($file, $lang, $nB, $nCm, $nCo, $nB + $nCm + $nCo); } printf $val_fmt{$format}, @out; $n_blank += $nB; $n_comment += $nCm; $n_code += $nCo; $n_total += $nB + $nCm + $nCo; } } print "$hyphens{$format}\n"; if (scalar @file_list > 1) { if ($format == 1) { @out = ( "SUM", $n_files, $n_blank, $n_comment, $n_code ); } elsif ($format == 2) { @out = ( "SUM", $n_files, $n_blank, $n_comment, $n_code, $n_total ); } elsif ($format == 3) { @out = ( "SUM", " ", $n_blank, $n_comment, $n_code ); } elsif ($format == 4) { @out = ( "SUM", $n_blank, $n_comment, $n_code, $n_total ); } else { @out = ( "SUM", " ", $n_blank, $n_comment, $n_code, $n_total ); } printf $val_fmt{$format}, @out; print "$hyphens{$format}\n"; } } # 1}}} # really_is_pascal, really_is_incpascal, really_is_php from SLOCCount my %php_files = (); # really_is_php() sub really_is_pascal { # {{{1 # Given filename, returns TRUE if its contents really are Pascal. # This isn't as obvious as it seems. # Many ".p" files are Perl files # (such as /usr/src/redhat/BUILD/ispell-3.1/dicts/czech/glob.p), # others are C extractions # (such as /usr/src/redhat/BUILD/linux/include/linux/umsdos_fs.p # and some files in linuxconf). # However, test files in "p2c" really are Pascal, for example. # Note that /usr/src/redhat/BUILD/ucd-snmp-4.1.1/ov/bitmaps/UCD.20.p # is actually C code. The heuristics determine that they're not Pascal, # but because it ends in ".p" it's not counted as C code either. # I believe this is actually correct behavior, because frankly it # looks like it's automatically generated (it's a bitmap expressed as code). # Rather than guess otherwise, we don't include it in a list of # source files. Let's face it, someone who creates C files ending in ".p" # and expects them to be counted by default as C files in SLOCCount needs # their head examined. I suggest examining their head # with a sucker rod (see syslogd(8) for more on sucker rods). # This heuristic counts as Pascal such files such as: # /usr/src/redhat/BUILD/teTeX-1.0/texk/web2c/tangleboot.p # Which is hand-generated. We don't count woven documents now anyway, # so this is justifiable. my $filename = shift; chomp($filename); # The heuristic is as follows: it's Pascal _IF_ it has all of the following # (ignoring {...} and (*...*) comments): # 1. "^..program NAME" or "^..unit NAME", # 2. "procedure", "function", "^..interface", or "^..implementation", # 3. a "begin", and # 4. it ends with "end.", # # Or it has all of the following: # 1. "^..module NAME" and # 2. it ends with "end.". # # Or it has all of the following: # 1. "^..program NAME", # 2. a "begin", and # 3. it ends with "end.". # # The "end." requirements in particular filter out non-Pascal. # # Note (jgb): this does not detect Pascal main files in fpc, like # fpc-1.0.4/api/test/testterminfo.pas, which does not have "program" in # it my $is_pascal = 0; # Value to determine. my $has_program = 0; my $has_unit = 0; my $has_module = 0; my $has_procedure_or_function = 0; my $found_begin = 0; my $found_terminating_end = 0; my $has_begin = 0; my $PASCAL_FILE = open_file('<', $filename, 0); die "Can't open $filename to determine if it's pascal.\n" if !defined $PASCAL_FILE; while(<$PASCAL_FILE>) { s/\{.*?\}//g; # Ignore {...} comments on this line; imperfect, but effective. s/\(\*.*?\*\)//g; # Ignore (*...*) comments on this line; imperfect, but effective. if (m/\bprogram\s+[A-Za-z]/i) {$has_program=1;} if (m/\bunit\s+[A-Za-z]/i) {$has_unit=1;} if (m/\bmodule\s+[A-Za-z]/i) {$has_module=1;} if (m/\bprocedure\b/i) { $has_procedure_or_function = 1; } if (m/\bfunction\b/i) { $has_procedure_or_function = 1; } if (m/^\s*interface\s+/i) { $has_procedure_or_function = 1; } if (m/^\s*implementation\s+/i) { $has_procedure_or_function = 1; } if (m/\bbegin\b/i) { $has_begin = 1; } # Originally I said: # "This heuristic fails if there are multi-line comments after # "end."; I haven't seen that in real Pascal programs:" # But jgb found there are a good quantity of them in Debian, specially in # fpc (at the end of a lot of files there is a multiline comment # with the changelog for the file). # Therefore, assume Pascal if "end." appears anywhere in the file. if (m/end\.\s*$/i) {$found_terminating_end = 1;} # elsif (m/\S/) {$found_terminating_end = 0;} } close($PASCAL_FILE); # Okay, we've examined the entire file looking for clues; # let's use those clues to determine if it's really Pascal: if ( ( ($has_unit || $has_program) && $has_procedure_or_function && $has_begin && $found_terminating_end ) || ( $has_module && $found_terminating_end ) || ( $has_program && $has_begin && $found_terminating_end ) ) {$is_pascal = 1;} return $is_pascal; } # 1}}} sub really_is_incpascal { # {{{1 # Given filename, returns TRUE if its contents really are Pascal. # For .inc files (mainly seen in fpc) my $filename = shift; chomp($filename); # The heuristic is as follows: it is Pascal if any of the following: # 1. really_is_pascal returns true # 2. Any usual reserved word is found (program, unit, const, begin...) # If the general routine for Pascal files works, we have it if (really_is_pascal($filename)) { return 1; } my $is_pascal = 0; # Value to determine. my $found_begin = 0; my $PASCAL_FILE = open_file('<', $filename, 0); die "Can't open $filename to determine if it's pascal.\n" if !defined $PASCAL_FILE; while(<$PASCAL_FILE>) { s/\{.*?\}//g; # Ignore {...} comments on this line; imperfect, but effective. s/\(\*.*?\*\)//g; # Ignore (*...*) comments on this line; imperfect, but effective. if (m/\bprogram\s+[A-Za-z]/i) {$is_pascal=1;} if (m/\bunit\s+[A-Za-z]/i) {$is_pascal=1;} if (m/\bmodule\s+[A-Za-z]/i) {$is_pascal=1;} if (m/\bprocedure\b/i) {$is_pascal = 1; } if (m/\bfunction\b/i) {$is_pascal = 1; } if (m/^\s*interface\s+/i) {$is_pascal = 1; } if (m/^\s*implementation\s+/i) {$is_pascal = 1; } if (m/\bconstant\s+/i) {$is_pascal=1;} if (m/\bbegin\b/i) { $found_begin = 1; } if ((m/end\.\s*$/i) && ($found_begin = 1)) {$is_pascal = 1;} if ($is_pascal) { last; } } close($PASCAL_FILE); return $is_pascal; } # 1}}} sub really_is_php { # {{{1 # Given filename, returns TRUE if its contents really is php. my $filename = shift; chomp($filename); my $is_php = 0; # Value to determine. # Need to find a matching pair of surrounds, with ending after beginning: my $normal_surround = 0; # my $script_surround = 0; # ; bit 0 =