mc/vfs/extfs/patchfs.in

375 lines
8.8 KiB
Plaintext

#! @PERL@ -w
#
# Written by Adam Byrtek <alpha@debian.org>, 2002
#
# Extfs to handle patches in context and unified diff format.
# Known issues: When name of file to patch is modified during editing,
# hunk is duplicated on copyin. It is unavoidable.
use bytes;
use strict;
use POSIX;
use File::Temp 'tempfile';
# standard binaries
my $bzip = 'bzip2';
my $gzip = 'gzip';
my $fileutil = 'file';
# date parsing requires Date::Parse from TimeDate module
my $parsedates = eval 'require Date::Parse';
# regular expressions
my $unified_header=qr/^--- .*\n\+\+\+ .*\n@@ .* @@.*\n$/;
my $unified_extract=qr/^--- ([^\s]+).*\n\+\+\+ ([^\s]+)\s*([^\t\n]*)/;
my $unified_contents=qr/^([+\-\\ \n]|@@ .* @@)/;
my $context_header=qr/^\*\*\* .*\n--- .*\n\*{15}\n$/;
my $context_extract=qr/^\*\*\* ([^\s]+).*\n--- ([^\s]+)\s*([^\t\n]*)/;
my $context_contents=qr/^([!+\-\\ \n]|-{3} .* -{4}|\*{3} .* \*{4}|\*{15})/;
my $ls_extract_id=qr/^[^\s]+\s+[^\s]+\s+([^\s]+)\s+([^\s]+)/;
my $basename=qr|^(.*/)*([^/]+)$|;
sub patchfs_canonicalize_path ($) {
my ($fname) = @_;
$fname =~ s,/+,/,g;
$fname =~ s,(^|/)(?:\.?\./)+,$1,;
return $fname;
}
# output unix date in a mc-readable format
sub timef
{
my @time=localtime($_[0]);
return sprintf '%02d-%02d-%02d %02d:%02d', $time[4]+1, $time[3],
$time[5]+1900, $time[2], $time[1];
}
# parse given string as a date and return unix time
sub datetime
{
# in case of problems fall back to 0 in unix time
# note: str2time interprets some wrong values (eg. " ") as 'today'
if ($parsedates && defined (my $t=str2time($_[0]))) {
return timef($t);
}
return timef(time);
}
# print message on stderr and exit
sub error
{
print STDERR $_[0], "\n";
exit 1;
}
# (compressed) input
sub myin
{
my ($qfname)=(quotemeta $_[0]);
$_=`$fileutil $qfname`;
if (/bzip/) {
return "$bzip -dc $qfname";
} elsif (/gzip/) {
return "$gzip -dc $qfname";
} else {
return "cat $qfname";
}
}
# (compressed) output
sub myout
{
my ($qfname,$append)=(quotemeta $_[0],$_[1]);
my ($sep) = $append ? '>>' : '>';
$_=`$fileutil $qfname`;
if (/bzip/) {
return "$bzip -c $sep $qfname";
} elsif (/gzip/) {
return "$gzip -c $sep $qfname";
} else {
return "cat $sep $qfname";
}
}
# select diff filename conforming with rules found in diff.info
sub diff_filename
{
my ($fsrc,$fdst)= @_;
$fsrc = patchfs_canonicalize_path ($fsrc);
$fdst = patchfs_canonicalize_path ($fdst);
if (!$fdst && !$fsrc) {
error 'Index: not yet implemented';
} elsif (!$fsrc || $fsrc eq '/dev/null') {
return ($fdst,'PATCH-CREATE/');
} elsif (!$fdst || $fdst eq '/dev/null') {
return ($fsrc,'PATCH-REMOVE/');
} elsif (($fdst eq '/dev/null') && ($fsrc eq '/dev/null')) {
error 'Malformed diff';
} else {
# fewest path name components
if ($fdst=~s|/|/|g < $fsrc=~s|/|/|g) {
return ($fdst,'');
} elsif ($fdst=~s|/|/|g > $fsrc=~s|/|/|g) {
return ($fsrc,'');
} else {
# shorter base name
if (($fdst=~/$basename/,length $2) < ($fsrc=~/$basename/,length $2)) {
return ($fdst,'');
} elsif (($fdst=~/$basename/,length $2) > ($fsrc=~/$basename/,length $2)) {
return ($fsrc,'');
} else {
# shortest names
if (length $fdst < length $fsrc) {
return ($fdst,'');
} else {
return ($fsrc,'');
}
}
}
}
}
# parse unified or context header
sub parse_header
{
my ($unified,$context,$buf)=@_;
if ($unified) {
error "Can't parse unified diff header"
unless ((($$buf.=<I>).=<I>)=~/$unified_header/);
return $$buf=~/$unified_extract/;
} elsif ($context) {
error "Can't parse context diff header"
unless ((($$buf.=<I>).=<I>)=~/$context_header/);
return $$buf=~/$context_extract/;
}
}
# list files affected by patch
sub list
{
my ($archive)=(quotemeta $_[0]);
my ($state,$pos,$len,$time);
my ($f,$fsrc,$fdst,$prefix);
my ($unified,$context)=(0,0);
# use uid and gid from file
my ($uid,$gid)=(`ls -l $archive`=~/$ls_extract_id/);
import Date::Parse if ($parsedates);
# state==1 means diff contents, state==0 means comments
$state=0; $len=0; $f='';
while (<I>) {
# recognize diff type
if (!$unified && !$context) {
$unified=1 if (/^--- /);
$context=1 if (/^\*\*\* /);
if (!$unified && !$context) {
$len+=length;
next;
}
}
if (($unified && /^--- /) || ($context && /^\*\*\* [^\*]*$/)) {
# start of new file
if ($state==1) {
printf "-rw-r--r-- 1 %s %s %d %s %s%s\n", $uid, $gid, $len, datetime($time), $prefix, $f
if $f;
$len=0;
}
$state=1;
($fsrc,$fdst,$time)=parse_header($unified,$context,\$_);
($f,$prefix)=diff_filename($fsrc,$fdst);
$f=$f.".diff";
} elsif ($state==1 && (($unified && !/$unified_contents/) || ($context && !/$context_contents/))) {
# start of comments, end of diff contents
printf "-rw-r--r-- 1 %s %s %d %s %s%s\n", $uid, $gid, $len, datetime($time), $prefix, $f
if $f;
$state=$len=0;
}
$len+=length;
}
printf "-rw-r--r-- 1 %s %s %d %s %s%s\n", $uid, $gid, $len, datetime($time), $prefix, $f
if ($f && $state==1);
}
# extract diff from patch
sub copyout
{
my ($file,$out)=@_;
my ($fsrc,$fdst,$found,$state,$buf);
my ($unified,$context)=(0,0);
$file=~s/^(PATCH-(CREATE|REMOVE)\/)?(.*)\.diff$/$3/;
$file = patchfs_canonicalize_path ($file);
# state==1 means diff contents, state==0 mens comments
$state=0; $found=0; $buf='';
while (<I>) {
# recognize diff type
if (!$unified && !$context) {
$unified=1 if (/^--- /);
$context=1 if (/^\*\*\* /);
if (!$unified && !$context) {
$buf.=$_;
next;
}
}
if (($unified && /^--- /) || ($context && /^\*\*\* [^\*]*$/)) {
last if ($state==1 && $found);
$state=1;
($fsrc,$fdst,)=parse_header($unified,$context,\$_);
$fsrc = patchfs_canonicalize_path ($fsrc);
$fdst = patchfs_canonicalize_path ($fdst);
$found=1 if (($fsrc eq $file) || ($fdst eq $file));
} elsif ($state==1 && (($unified && !/$unified_contents/) || ($context && !/$context_contents/))) {
# start of comments, end of diff contents
last if ($found);
$state=0;
$buf='';
}
$buf.=$_ if ($found || $state==0)
}
if ($found) {
open O, "> $out";
print O $buf;
close O;
}
}
# remove diff(s) from patch
sub rm
{
my ($archive)=(shift);
my ($fsrc,$fdst,$found,$state,$buf);
my ($tmp,$tmpname)=tempfile();
my ($unified,$context)=(0,0);
@_=map {scalar(s/^(PATCH-(CREATE|REMOVE)\/)?(.*)\.diff$/$3/,$_)} @_;
# state==1 means diff contents, state==0 mens comments
$state=0; $found=0; $buf='';
while (<I>) {
# recognize diff type
if (!$unified && !$context) {
$unified=1 if (/^--- /);
$context=1 if (/^\*\*\* /);
if (!$unified && !$context) {
$buf.=$_;
next;
}
}
if (($unified && /^--- /) || ($context && /^\*\*\* [^\*]*$/)) {
$state=1;
($fsrc,$fdst,)=parse_header($unified,$context,\$_);
# remove listed files
foreach (@_) {
if (($fsrc eq $_) || ($fdst eq $_)) {
$found=1;
last;
}
}
if (!$found) {
print $tmp $buf;
$buf='';
}
} elsif ($state==1 && (($unified && !/$unified_contents/) || ($context && !/$context_contents/))) {
# start of comments, end of diff contents
$found=0;
$state=0;
$buf='';
}
if ($state==0) {
$buf.=$_;
} elsif (!$found) {
print $tmp $_;
}
}
print $tmp $buf if (!$found);
close $tmp;
close I;
# replace archive with temporary file
system('cat '.quotemeta($tmpname).'|'.myout($archive,0))==0
or error "Can't write to archive";
system 'rm -f '.quotemeta($tmpname);
}
# append diff to archive
sub copyin
{
my ($archive,$name,$src)=(@_);
my ($fsrc,$fdst,$f,@files);
my ($unified,$context)=(0,0);
# build filelist
open I, myin($src).'|';
while (<I>) {
# recognize diff type
if (!$unified && !$context) {
$unified=1 if (/^--- /);
$context=1 if (/^\*\*\* /);
}
if (($unified && /^--- /) || ($context && /^\*\*\* [^\*]*$/)) {
($fsrc,$fdst,)=parse_header($unified,$context,\$_);
($f,)=diff_filename($fsrc,$fdst);
push(@files,$f);
}
}
close I;
# remove overwrited files
open I, myin($archive).'|';
rm ($archive, map($_.'.diff',@files));
close I;
my $cmd1=myin($src);
my $cmd2=myout($archive,1);
system("$cmd1 | $cmd2")==0
or error "Can't write to archive";
}
if ($ARGV[0] eq 'list') {
open I, myin($ARGV[1]).'|';
list $ARGV[1];
exit 0;
} elsif ($ARGV[0] eq 'copyout') {
open I, myin($ARGV[1])."|";
copyout ($ARGV[2], $ARGV[3]);
exit 0;
} elsif ($ARGV[0] eq 'rm') {
open I, myin($ARGV[1])."|";
rm ($ARGV[1], $ARGV[2]);
exit 0;
} elsif ($ARGV[0] eq 'rmdir') {
exit 0;
} elsif ($ARGV[0] eq 'mkdir') {
exit 0;
} elsif ($ARGV[0] eq 'copyin') {
copyin ($ARGV[1], $ARGV[2], $ARGV[3]);
exit 0;
}
exit 1;