1
0
mirror of https://github.com/golang/go synced 2024-11-19 22:04:44 -07:00

archive/tar: partially revert sparse file support

This CL removes the following APIs:
	type SparseEntry struct{ ... }
	type Header struct{ SparseHoles []SparseEntry; ... }
	func (*Header) DetectSparseHoles(f *os.File) error
	func (*Header) PunchSparseHoles(f *os.File) error
	func (*Reader) WriteTo(io.Writer) (int, error)
	func (*Writer) ReadFrom(io.Reader) (int, error)

This API was added during the Go1.10 dev cycle, and are safe to remove.

The rationale for reverting is because Header.DetectSparseHoles and
Header.PunchSparseHoles are functionality that probably better belongs in
the os package itself.

The other API like Header.SparseHoles, Reader.WriteTo, and Writer.ReadFrom
perform no OS specific logic and only perform the actual business logic of
reading and writing sparse archives. Since we do know know what the API added to
package os may look like, we preemptively revert these non-OS specific changes
as well by simply commenting them out.

Updates #13548
Updates #22735

Change-Id: I77842acd39a43de63e5c754bfa1c26cc24687b70
Reviewed-on: https://go-review.googlesource.com/78030
Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
Joe Tsai 2017-11-15 11:27:10 -08:00 committed by Joe Tsai
parent ca886e0673
commit ba2835db6c
10 changed files with 320 additions and 985 deletions

View File

@ -13,7 +13,6 @@ package tar
import (
"errors"
"fmt"
"io"
"math"
"os"
"path"
@ -82,7 +81,6 @@ const (
TypeXGlobalHeader = 'g'
// Type 'S' indicates a sparse file in the GNU format.
// Header.SparseHoles should be populated when using this type.
TypeGNUSparse = 'S'
// Types 'L' and 'K' are used by the GNU format for a meta file
@ -164,19 +162,6 @@ type Header struct {
Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
// SparseHoles represents a sequence of holes in a sparse file.
//
// A file is sparse if len(SparseHoles) > 0 or Typeflag is TypeGNUSparse.
// If TypeGNUSparse is set, then the format is GNU, otherwise
// the format is PAX (by using GNU-specific PAX records).
//
// A sparse file consists of fragments of data, intermixed with holes
// (described by this field). A hole is semantically a block of NUL-bytes,
// but does not actually exist within the tar file.
// The holes must be sorted in ascending order,
// not overlap with each other, and not extend past the specified Size.
SparseHoles []SparseEntry
// Xattrs stores extended attributes as PAX records under the
// "SCHILY.xattr." namespace.
//
@ -214,10 +199,10 @@ type Header struct {
Format Format
}
// SparseEntry represents a Length-sized fragment at Offset in the file.
type SparseEntry struct{ Offset, Length int64 }
// sparseEntry represents a Length-sized fragment at Offset in the file.
type sparseEntry struct{ Offset, Length int64 }
func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length }
func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
// A sparse file can be represented as either a sparseDatas or a sparseHoles.
// As long as the total size is known, they are equivalent and one can be
@ -240,7 +225,7 @@ func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length }
// {Offset: 2, Length: 5}, // Data fragment for 2..6
// {Offset: 18, Length: 3}, // Data fragment for 18..20
// }
// var sph sparseHoles = []SparseEntry{
// var sph sparseHoles = []sparseEntry{
// {Offset: 0, Length: 2}, // Hole fragment for 0..1
// {Offset: 7, Length: 11}, // Hole fragment for 7..17
// {Offset: 21, Length: 4}, // Hole fragment for 21..24
@ -249,19 +234,19 @@ func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length }
// Then the content of the resulting sparse file with a Header.Size of 25 is:
// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
type (
sparseDatas []SparseEntry
sparseHoles []SparseEntry
sparseDatas []sparseEntry
sparseHoles []sparseEntry
)
// validateSparseEntries reports whether sp is a valid sparse map.
// It does not matter whether sp represents data fragments or hole fragments.
func validateSparseEntries(sp []SparseEntry, size int64) bool {
func validateSparseEntries(sp []sparseEntry, size int64) bool {
// Validate all sparse entries. These are the same checks as performed by
// the BSD tar utility.
if size < 0 {
return false
}
var pre SparseEntry
var pre sparseEntry
for _, cur := range sp {
switch {
case cur.Offset < 0 || cur.Length < 0:
@ -285,7 +270,7 @@ func validateSparseEntries(sp []SparseEntry, size int64) bool {
// Even though the Go tar Reader and the BSD tar utility can handle entries
// with arbitrary offsets and lengths, the GNU tar utility can only handle
// offsets and lengths that are multiples of blockSize.
func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry {
func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
dst := src[:0]
for _, s := range src {
pos, end := s.Offset, s.endOffset()
@ -294,7 +279,7 @@ func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry {
end -= blockPadding(-end) // Round-down to nearest blockSize
}
if pos < end {
dst = append(dst, SparseEntry{Offset: pos, Length: end - pos})
dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
}
}
return dst
@ -308,9 +293,9 @@ func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry {
// * adjacent fragments are coalesced together
// * only the last fragment may be empty
// * the endOffset of the last fragment is the total size
func invertSparseEntries(src []SparseEntry, size int64) []SparseEntry {
func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
dst := src[:0]
var pre SparseEntry
var pre sparseEntry
for _, cur := range src {
if cur.Length == 0 {
continue // Skip empty fragments
@ -491,6 +476,9 @@ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err
}
}
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
// Check sparse files.
if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
if isHeaderOnlyType(h.Typeflag) {
@ -509,6 +497,7 @@ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err
whyNoUSTAR = "USTAR does not support sparse files"
format.mustNotBe(FormatUSTAR)
}
*/
// Check desired format.
if wantFormat := h.Format; wantFormat != FormatUnknown {
@ -532,66 +521,6 @@ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err
return format, paxHdrs, err
}
var sysSparseDetect func(f *os.File) (sparseHoles, error)
var sysSparsePunch func(f *os.File, sph sparseHoles) error
// DetectSparseHoles searches for holes within f to populate SparseHoles
// on supported operating systems and filesystems.
// The file offset is cleared to zero.
//
// When packing a sparse file, DetectSparseHoles should be called prior to
// serializing the header to the archive with Writer.WriteHeader.
func (h *Header) DetectSparseHoles(f *os.File) (err error) {
defer func() {
if _, serr := f.Seek(0, io.SeekStart); err == nil {
err = serr
}
}()
h.SparseHoles = nil
if sysSparseDetect != nil {
sph, err := sysSparseDetect(f)
h.SparseHoles = sph
return err
}
return nil
}
// PunchSparseHoles destroys the contents of f, and prepares a sparse file
// (on supported operating systems and filesystems)
// with holes punched according to SparseHoles.
// The file offset is cleared to zero.
//
// When extracting a sparse file, PunchSparseHoles should be called prior to
// populating the content of a file with Reader.WriteTo.
func (h *Header) PunchSparseHoles(f *os.File) (err error) {
defer func() {
if _, serr := f.Seek(0, io.SeekStart); err == nil {
err = serr
}
}()
if err := f.Truncate(0); err != nil {
return err
}
var size int64
if len(h.SparseHoles) > 0 {
size = h.SparseHoles[len(h.SparseHoles)-1].endOffset()
}
if !validateSparseEntries(h.SparseHoles, size) {
return errors.New("archive/tar: invalid sparse holes")
}
if size == 0 {
return nil // For non-sparse files, do nothing (other than Truncate)
}
if sysSparsePunch != nil {
return sysSparsePunch(f, h.SparseHoles)
}
return f.Truncate(size)
}
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
@ -693,9 +622,6 @@ const (
// Since os.FileInfo's Name method only returns the base name of
// the file it describes, it may be necessary to modify Header.Name
// to provide the full path name of the file.
//
// This function does not populate Header.SparseHoles;
// for sparse file support, additionally call Header.DetectSparseHoles.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("archive/tar: FileInfo is nil")
@ -761,9 +687,6 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
h.Size = 0
h.Linkname = sys.Linkname
}
if sys.SparseHoles != nil {
h.SparseHoles = append([]SparseEntry{}, sys.SparseHoles...)
}
if sys.PAXRecords != nil {
h.PAXRecords = make(map[string]string)
for k, v := range sys.PAXRecords {

View File

@ -7,13 +7,10 @@ package tar_test
import (
"archive/tar"
"bytes"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
)
func Example_minimal() {
@ -72,179 +69,3 @@ func Example_minimal() {
// Contents of todo.txt:
// Get animal handling license.
}
// A sparse file can efficiently represent a large file that is mostly empty.
// When packing an archive, Header.DetectSparseHoles can be used to populate
// the sparse map, while Header.PunchSparseHoles can be used to create a
// sparse file on disk when extracting an archive.
func Example_sparseAutomatic() {
// Create the source sparse file.
src, err := ioutil.TempFile("", "sparse.db")
if err != nil {
log.Fatal(err)
}
defer os.Remove(src.Name()) // Best-effort cleanup
defer func() {
if err := src.Close(); err != nil {
log.Fatal(err)
}
}()
if err := src.Truncate(10e6); err != nil {
log.Fatal(err)
}
for i := 0; i < 10; i++ {
if _, err := src.Seek(1e6-1e3, io.SeekCurrent); err != nil {
log.Fatal(err)
}
if _, err := src.Write(bytes.Repeat([]byte{'0' + byte(i)}, 1e3)); err != nil {
log.Fatal(err)
}
}
// Create an archive and pack the source sparse file to it.
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
fi, err := src.Stat()
if err != nil {
log.Fatal(err)
}
hdr, err := tar.FileInfoHeader(fi, "")
if err != nil {
log.Fatal(err)
}
if err := hdr.DetectSparseHoles(src); err != nil {
log.Fatal(err)
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatal(err)
}
if _, err := io.Copy(tw, src); err != nil {
log.Fatal(err)
}
if err := tw.Close(); err != nil {
log.Fatal(err)
}
// Create the destination sparse file.
dst, err := ioutil.TempFile("", "sparse.db")
if err != nil {
log.Fatal(err)
}
defer os.Remove(dst.Name()) // Best-effort cleanup
defer func() {
if err := dst.Close(); err != nil {
log.Fatal(err)
}
}()
// Open the archive and extract the sparse file into the destination file.
tr := tar.NewReader(&buf)
hdr, err = tr.Next()
if err != nil {
log.Fatal(err)
}
if err := hdr.PunchSparseHoles(dst); err != nil {
log.Fatal(err)
}
if _, err := io.Copy(dst, tr); err != nil {
log.Fatal(err)
}
// Verify that the sparse files are identical.
want, err := ioutil.ReadFile(src.Name())
if err != nil {
log.Fatal(err)
}
got, err := ioutil.ReadFile(dst.Name())
if err != nil {
log.Fatal(err)
}
fmt.Printf("Src MD5: %08x\n", md5.Sum(want))
fmt.Printf("Dst MD5: %08x\n", md5.Sum(got))
// Output:
// Src MD5: 33820d648d42cb3da2515da229149f74
// Dst MD5: 33820d648d42cb3da2515da229149f74
}
// The SparseHoles can be manually constructed without Header.DetectSparseHoles.
func Example_sparseManual() {
// Define a sparse file to add to the archive.
// This sparse files contains 5 data fragments, and 4 hole fragments.
// The logical size of the file is 16 KiB, while the physical size of the
// file is only 3 KiB (not counting the header data).
hdr := &tar.Header{
Name: "sparse.db",
Size: 16384,
SparseHoles: []tar.SparseEntry{
// Data fragment at 0..1023
{Offset: 1024, Length: 1024 - 512}, // Hole fragment at 1024..1535
// Data fragment at 1536..2047
{Offset: 2048, Length: 2048 - 512}, // Hole fragment at 2048..3583
// Data fragment at 3584..4095
{Offset: 4096, Length: 4096 - 512}, // Hole fragment at 4096..7679
// Data fragment at 7680..8191
{Offset: 8192, Length: 8192 - 512}, // Hole fragment at 8192..15871
// Data fragment at 15872..16383
},
}
// The regions marked as a sparse hole are filled with NUL-bytes.
// The total length of the body content must match the specified Size field.
body := "" +
strings.Repeat("A", 1024) +
strings.Repeat("\x00", 1024-512) +
strings.Repeat("B", 512) +
strings.Repeat("\x00", 2048-512) +
strings.Repeat("C", 512) +
strings.Repeat("\x00", 4096-512) +
strings.Repeat("D", 512) +
strings.Repeat("\x00", 8192-512) +
strings.Repeat("E", 512)
h := md5.Sum([]byte(body))
fmt.Printf("Write content of %s, Size: %d, MD5: %08x\n", hdr.Name, len(body), h)
fmt.Printf("Write SparseHoles of %s:\n\t%v\n\n", hdr.Name, hdr.SparseHoles)
// Create a new archive and write the sparse file.
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
if err := tw.WriteHeader(hdr); err != nil {
log.Fatal(err)
}
if _, err := tw.Write([]byte(body)); err != nil {
log.Fatal(err)
}
if err := tw.Close(); err != nil {
log.Fatal(err)
}
// Open and iterate through the files in the archive.
tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
body, err := ioutil.ReadAll(tr)
if err != nil {
log.Fatal(err)
}
h := md5.Sum([]byte(body))
fmt.Printf("Read content of %s, Size: %d, MD5: %08x\n", hdr.Name, len(body), h)
fmt.Printf("Read SparseHoles of %s:\n\t%v\n\n", hdr.Name, hdr.SparseHoles)
}
// Output:
// Write content of sparse.db, Size: 16384, MD5: 9b4e2cfae0f9303d30237718e891e9f9
// Write SparseHoles of sparse.db:
// [{1024 512} {2048 1536} {4096 3584} {8192 7680}]
//
// Read content of sparse.db, Size: 16384, MD5: 9b4e2cfae0f9303d30237718e891e9f9
// Read SparseHoles of sparse.db:
// [{1024 512} {2048 1536} {4096 3584} {8192 7680} {16384 0}]
}

View File

@ -41,6 +41,8 @@ import "strings"
// The table's lower portion shows specialized features of each format,
// such as supported string encodings, support for sub-second timestamps,
// or support for sparse files.
//
// The Writer currently provides no support for sparse files.
type Format int
// Constants to identify various tar formats.

View File

@ -192,7 +192,6 @@ func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
}
sph := invertSparseEntries(spd, hdr.Size)
tr.curr = &sparseFileReader{tr.curr, sph, 0}
hdr.SparseHoles = append([]SparseEntry{}, sph...)
}
return err
}
@ -486,7 +485,7 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, err
if p.err != nil {
return nil, p.err
}
spd = append(spd, SparseEntry{Offset: offset, Length: length})
spd = append(spd, sparseEntry{Offset: offset, Length: length})
}
if s.IsExtended()[0] > 0 {
@ -566,7 +565,7 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
if err1 != nil || err2 != nil {
return nil, ErrHeader
}
spd = append(spd, SparseEntry{Offset: offset, Length: length})
spd = append(spd, sparseEntry{Offset: offset, Length: length})
}
return spd, nil
}
@ -600,7 +599,7 @@ func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
if err1 != nil || err2 != nil {
return nil, ErrHeader
}
spd = append(spd, SparseEntry{Offset: offset, Length: length})
spd = append(spd, sparseEntry{Offset: offset, Length: length})
sparseMap = sparseMap[2:]
}
return spd, nil
@ -627,14 +626,17 @@ func (tr *Reader) Read(b []byte) (int, error) {
return n, err
}
// WriteTo writes the content of the current file to w.
// writeTo writes the content of the current file to w.
// The bytes written matches the number of remaining bytes in the current file.
//
// If the current file is sparse and w is an io.WriteSeeker,
// then WriteTo uses Seek to skip past holes defined in Header.SparseHoles,
// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
// assuming that skipped regions are filled with NULs.
// This always writes the last byte to ensure w is the right size.
func (tr *Reader) WriteTo(w io.Writer) (int64, error) {
//
// TODO(dsnet): Re-export this when adding sparse file support.
// See https://golang.org/issue/22735
func (tr *Reader) writeTo(w io.Writer) (int64, error) {
if tr.err != nil {
return 0, tr.err
}

View File

@ -71,23 +71,6 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
SparseHoles: []SparseEntry{
{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}, {12, 1}, {14, 1},
{16, 1}, {18, 1}, {20, 1}, {22, 1}, {24, 1}, {26, 1}, {28, 1},
{30, 1}, {32, 1}, {34, 1}, {36, 1}, {38, 1}, {40, 1}, {42, 1},
{44, 1}, {46, 1}, {48, 1}, {50, 1}, {52, 1}, {54, 1}, {56, 1},
{58, 1}, {60, 1}, {62, 1}, {64, 1}, {66, 1}, {68, 1}, {70, 1},
{72, 1}, {74, 1}, {76, 1}, {78, 1}, {80, 1}, {82, 1}, {84, 1},
{86, 1}, {88, 1}, {90, 1}, {92, 1}, {94, 1}, {96, 1}, {98, 1},
{100, 1}, {102, 1}, {104, 1}, {106, 1}, {108, 1}, {110, 1},
{112, 1}, {114, 1}, {116, 1}, {118, 1}, {120, 1}, {122, 1},
{124, 1}, {126, 1}, {128, 1}, {130, 1}, {132, 1}, {134, 1},
{136, 1}, {138, 1}, {140, 1}, {142, 1}, {144, 1}, {146, 1},
{148, 1}, {150, 1}, {152, 1}, {154, 1}, {156, 1}, {158, 1},
{160, 1}, {162, 1}, {164, 1}, {166, 1}, {168, 1}, {170, 1},
{172, 1}, {174, 1}, {176, 1}, {178, 1}, {180, 1}, {182, 1},
{184, 1}, {186, 1}, {188, 1}, {190, 10},
},
Format: FormatGNU,
}, {
Name: "sparse-posix-0.0",
@ -102,23 +85,6 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
SparseHoles: []SparseEntry{
{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}, {12, 1}, {14, 1},
{16, 1}, {18, 1}, {20, 1}, {22, 1}, {24, 1}, {26, 1}, {28, 1},
{30, 1}, {32, 1}, {34, 1}, {36, 1}, {38, 1}, {40, 1}, {42, 1},
{44, 1}, {46, 1}, {48, 1}, {50, 1}, {52, 1}, {54, 1}, {56, 1},
{58, 1}, {60, 1}, {62, 1}, {64, 1}, {66, 1}, {68, 1}, {70, 1},
{72, 1}, {74, 1}, {76, 1}, {78, 1}, {80, 1}, {82, 1}, {84, 1},
{86, 1}, {88, 1}, {90, 1}, {92, 1}, {94, 1}, {96, 1}, {98, 1},
{100, 1}, {102, 1}, {104, 1}, {106, 1}, {108, 1}, {110, 1},
{112, 1}, {114, 1}, {116, 1}, {118, 1}, {120, 1}, {122, 1},
{124, 1}, {126, 1}, {128, 1}, {130, 1}, {132, 1}, {134, 1},
{136, 1}, {138, 1}, {140, 1}, {142, 1}, {144, 1}, {146, 1},
{148, 1}, {150, 1}, {152, 1}, {154, 1}, {156, 1}, {158, 1},
{160, 1}, {162, 1}, {164, 1}, {166, 1}, {168, 1}, {170, 1},
{172, 1}, {174, 1}, {176, 1}, {178, 1}, {180, 1}, {182, 1},
{184, 1}, {186, 1}, {188, 1}, {190, 10},
},
PAXRecords: map[string]string{
"GNU.sparse.size": "200",
"GNU.sparse.numblocks": "95",
@ -138,23 +104,6 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
SparseHoles: []SparseEntry{
{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}, {12, 1}, {14, 1},
{16, 1}, {18, 1}, {20, 1}, {22, 1}, {24, 1}, {26, 1}, {28, 1},
{30, 1}, {32, 1}, {34, 1}, {36, 1}, {38, 1}, {40, 1}, {42, 1},
{44, 1}, {46, 1}, {48, 1}, {50, 1}, {52, 1}, {54, 1}, {56, 1},
{58, 1}, {60, 1}, {62, 1}, {64, 1}, {66, 1}, {68, 1}, {70, 1},
{72, 1}, {74, 1}, {76, 1}, {78, 1}, {80, 1}, {82, 1}, {84, 1},
{86, 1}, {88, 1}, {90, 1}, {92, 1}, {94, 1}, {96, 1}, {98, 1},
{100, 1}, {102, 1}, {104, 1}, {106, 1}, {108, 1}, {110, 1},
{112, 1}, {114, 1}, {116, 1}, {118, 1}, {120, 1}, {122, 1},
{124, 1}, {126, 1}, {128, 1}, {130, 1}, {132, 1}, {134, 1},
{136, 1}, {138, 1}, {140, 1}, {142, 1}, {144, 1}, {146, 1},
{148, 1}, {150, 1}, {152, 1}, {154, 1}, {156, 1}, {158, 1},
{160, 1}, {162, 1}, {164, 1}, {166, 1}, {168, 1}, {170, 1},
{172, 1}, {174, 1}, {176, 1}, {178, 1}, {180, 1}, {182, 1},
{184, 1}, {186, 1}, {188, 1}, {190, 10},
},
PAXRecords: map[string]string{
"GNU.sparse.size": "200",
"GNU.sparse.numblocks": "95",
@ -175,23 +124,6 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
SparseHoles: []SparseEntry{
{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}, {12, 1}, {14, 1},
{16, 1}, {18, 1}, {20, 1}, {22, 1}, {24, 1}, {26, 1}, {28, 1},
{30, 1}, {32, 1}, {34, 1}, {36, 1}, {38, 1}, {40, 1}, {42, 1},
{44, 1}, {46, 1}, {48, 1}, {50, 1}, {52, 1}, {54, 1}, {56, 1},
{58, 1}, {60, 1}, {62, 1}, {64, 1}, {66, 1}, {68, 1}, {70, 1},
{72, 1}, {74, 1}, {76, 1}, {78, 1}, {80, 1}, {82, 1}, {84, 1},
{86, 1}, {88, 1}, {90, 1}, {92, 1}, {94, 1}, {96, 1}, {98, 1},
{100, 1}, {102, 1}, {104, 1}, {106, 1}, {108, 1}, {110, 1},
{112, 1}, {114, 1}, {116, 1}, {118, 1}, {120, 1}, {122, 1},
{124, 1}, {126, 1}, {128, 1}, {130, 1}, {132, 1}, {134, 1},
{136, 1}, {138, 1}, {140, 1}, {142, 1}, {144, 1}, {146, 1},
{148, 1}, {150, 1}, {152, 1}, {154, 1}, {156, 1}, {158, 1},
{160, 1}, {162, 1}, {164, 1}, {166, 1}, {168, 1}, {170, 1},
{172, 1}, {174, 1}, {176, 1}, {178, 1}, {180, 1}, {182, 1},
{184, 1}, {186, 1}, {188, 1}, {190, 10},
},
PAXRecords: map[string]string{
"GNU.sparse.major": "1",
"GNU.sparse.minor": "0",
@ -504,7 +436,6 @@ func TestReader(t *testing.T) {
Gname: "dsnet",
AccessTime: time.Unix(1441991948, 0),
ChangeTime: time.Unix(1441973436, 0),
SparseHoles: []SparseEntry{{0, 536870912}},
Format: FormatGNU,
}},
}, {
@ -625,7 +556,6 @@ func TestReader(t *testing.T) {
Typeflag: TypeGNUSparse,
Size: 1000,
ModTime: time.Unix(0, 0),
SparseHoles: []SparseEntry{{Offset: 1000, Length: 0}},
Format: FormatGNU,
}},
}, {
@ -636,7 +566,6 @@ func TestReader(t *testing.T) {
Typeflag: TypeGNUSparse,
Size: 1000,
ModTime: time.Unix(0, 0),
SparseHoles: []SparseEntry{{Offset: 0, Length: 1000}},
Format: FormatGNU,
}},
}, {
@ -647,7 +576,6 @@ func TestReader(t *testing.T) {
Typeflag: TypeReg,
Size: 1000,
ModTime: time.Unix(0, 0),
SparseHoles: []SparseEntry{{Offset: 1000, Length: 0}},
PAXRecords: map[string]string{
"size": "1512",
"GNU.sparse.major": "1",
@ -665,7 +593,6 @@ func TestReader(t *testing.T) {
Typeflag: TypeReg,
Size: 1000,
ModTime: time.Unix(0, 0),
SparseHoles: []SparseEntry{{Offset: 0, Length: 1000}},
PAXRecords: map[string]string{
"size": "512",
"GNU.sparse.major": "1",
@ -935,7 +862,7 @@ func TestReadTruncation(t *testing.T) {
}
cnt++
if s2 == "manual" {
if _, err = tr.WriteTo(ioutil.Discard); err != nil {
if _, err = tr.writeTo(ioutil.Discard); err != nil {
break
}
}
@ -1123,7 +1050,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
return out
}
makeSparseStrings := func(sp []SparseEntry) (out []string) {
makeSparseStrings := func(sp []sparseEntry) (out []string) {
var f formatter
for _, s := range sp {
var b [24]byte
@ -1377,7 +1304,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
wantMap: func() (spd sparseDatas) {
for i := 0; i < 100; i++ {
spd = append(spd, SparseEntry{int64(i) << 30, 512})
spd = append(spd, sparseEntry{int64(i) << 30, 512})
}
return spd
}(),

View File

@ -1,77 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"io"
"os"
"runtime"
"syscall"
)
func init() {
sysSparseDetect = sparseDetectUnix
}
func sparseDetectUnix(f *os.File) (sph sparseHoles, err error) {
// SEEK_DATA and SEEK_HOLE originated from Solaris and support for it
// has been added to most of the other major Unix systems.
var seekData, seekHole = 3, 4 // SEEK_DATA/SEEK_HOLE from unistd.h
if runtime.GOOS == "darwin" {
// Darwin has the constants swapped, compared to all other UNIX.
seekData, seekHole = 4, 3
}
// Check for seekData/seekHole support.
// Different OS and FS may differ in the exact errno that is returned when
// there is no support. Rather than special-casing every possible errno
// representing "not supported", just assume that a non-nil error means
// that seekData/seekHole is not supported.
if _, err := f.Seek(0, seekHole); err != nil {
return nil, nil
}
// Populate the SparseHoles.
var last, pos int64 = -1, 0
for {
// Get the location of the next hole section.
if pos, err = fseek(f, pos, seekHole); pos == last || err != nil {
return sph, err
}
offset := pos
last = pos
// Get the location of the next data section.
if pos, err = fseek(f, pos, seekData); pos == last || err != nil {
return sph, err
}
length := pos - offset
last = pos
if length > 0 {
sph = append(sph, SparseEntry{offset, length})
}
}
}
func fseek(f *os.File, pos int64, whence int) (int64, error) {
pos, err := f.Seek(pos, whence)
if errno(err) == syscall.ENXIO {
// SEEK_DATA returns ENXIO when past the last data fragment,
// which makes determining the size of the last hole difficult.
pos, err = f.Seek(0, io.SeekEnd)
}
return pos, err
}
func errno(err error) error {
if perr, ok := err.(*os.PathError); ok {
return perr.Err
}
return err
}

View File

@ -1,129 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package tar
import (
"os"
"syscall"
"unsafe"
)
var errInvalidFunc = syscall.Errno(1) // ERROR_INVALID_FUNCTION from WinError.h
func init() {
sysSparseDetect = sparseDetectWindows
sysSparsePunch = sparsePunchWindows
}
func sparseDetectWindows(f *os.File) (sph sparseHoles, err error) {
const queryAllocRanges = 0x000940CF // FSCTL_QUERY_ALLOCATED_RANGES from WinIoCtl.h
type allocRangeBuffer struct{ offset, length int64 } // FILE_ALLOCATED_RANGE_BUFFER from WinIoCtl.h
s, err := f.Stat()
if err != nil {
return nil, err
}
queryRange := allocRangeBuffer{0, s.Size()}
allocRanges := make([]allocRangeBuffer, 64)
// Repeatedly query for ranges until the input buffer is large enough.
var bytesReturned uint32
for {
err := syscall.DeviceIoControl(
syscall.Handle(f.Fd()), queryAllocRanges,
(*byte)(unsafe.Pointer(&queryRange)), uint32(unsafe.Sizeof(queryRange)),
(*byte)(unsafe.Pointer(&allocRanges[0])), uint32(len(allocRanges)*int(unsafe.Sizeof(allocRanges[0]))),
&bytesReturned, nil,
)
if err == syscall.ERROR_MORE_DATA {
allocRanges = make([]allocRangeBuffer, 2*len(allocRanges))
continue
}
if err == errInvalidFunc {
return nil, nil // Sparse file not supported on this FS
}
if err != nil {
return nil, err
}
break
}
n := bytesReturned / uint32(unsafe.Sizeof(allocRanges[0]))
allocRanges = append(allocRanges[:n], allocRangeBuffer{s.Size(), 0})
// Invert the data fragments into hole fragments.
var pos int64
for _, r := range allocRanges {
if r.offset > pos {
sph = append(sph, SparseEntry{pos, r.offset - pos})
}
pos = r.offset + r.length
}
return sph, nil
}
func sparsePunchWindows(f *os.File, sph sparseHoles) error {
const setSparse = 0x000900C4 // FSCTL_SET_SPARSE from WinIoCtl.h
const setZeroData = 0x000980C8 // FSCTL_SET_ZERO_DATA from WinIoCtl.h
type zeroDataInfo struct{ start, end int64 } // FILE_ZERO_DATA_INFORMATION from WinIoCtl.h
// Set the file as being sparse.
var bytesReturned uint32
devErr := syscall.DeviceIoControl(
syscall.Handle(f.Fd()), setSparse,
nil, 0, nil, 0,
&bytesReturned, nil,
)
if devErr != nil && devErr != errInvalidFunc {
return devErr
}
// Set the file to the right size.
var size int64
if len(sph) > 0 {
size = sph[len(sph)-1].endOffset()
}
if err := f.Truncate(size); err != nil {
return err
}
if devErr == errInvalidFunc {
// Sparse file not supported on this FS.
// Call sparsePunchManual since SetEndOfFile does not guarantee that
// the extended space is filled with zeros.
return sparsePunchManual(f, sph)
}
// Punch holes for all relevant fragments.
for _, s := range sph {
zdi := zeroDataInfo{s.Offset, s.endOffset()}
err := syscall.DeviceIoControl(
syscall.Handle(f.Fd()), setZeroData,
(*byte)(unsafe.Pointer(&zdi)), uint32(unsafe.Sizeof(zdi)),
nil, 0,
&bytesReturned, nil,
)
if err != nil {
return err
}
}
return nil
}
// sparsePunchManual writes zeros into each hole.
func sparsePunchManual(f *os.File, sph sparseHoles) error {
const chunkSize = 32 << 10
zbuf := make([]byte, chunkSize)
for _, s := range sph {
for pos := s.Offset; pos < s.endOffset(); pos += chunkSize {
n := min(chunkSize, s.endOffset()-pos)
if _, err := f.WriteAt(zbuf[:n], pos); err != nil {
return err
}
}
}
return nil
}

View File

@ -16,7 +16,6 @@ import (
"path"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"time"
@ -99,94 +98,94 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
return f.pos, nil
}
func equalSparseEntries(x, y []SparseEntry) bool {
func equalSparseEntries(x, y []sparseEntry) bool {
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
}
func TestSparseEntries(t *testing.T) {
vectors := []struct {
in []SparseEntry
in []sparseEntry
size int64
wantValid bool // Result of validateSparseEntries
wantAligned []SparseEntry // Result of alignSparseEntries
wantInverted []SparseEntry // Result of invertSparseEntries
wantAligned []sparseEntry // Result of alignSparseEntries
wantInverted []sparseEntry // Result of invertSparseEntries
}{{
in: []SparseEntry{}, size: 0,
in: []sparseEntry{}, size: 0,
wantValid: true,
wantInverted: []SparseEntry{{0, 0}},
wantInverted: []sparseEntry{{0, 0}},
}, {
in: []SparseEntry{}, size: 5000,
in: []sparseEntry{}, size: 5000,
wantValid: true,
wantInverted: []SparseEntry{{0, 5000}},
wantInverted: []sparseEntry{{0, 5000}},
}, {
in: []SparseEntry{{0, 5000}}, size: 5000,
in: []sparseEntry{{0, 5000}}, size: 5000,
wantValid: true,
wantAligned: []SparseEntry{{0, 5000}},
wantInverted: []SparseEntry{{5000, 0}},
wantAligned: []sparseEntry{{0, 5000}},
wantInverted: []sparseEntry{{5000, 0}},
}, {
in: []SparseEntry{{1000, 4000}}, size: 5000,
in: []sparseEntry{{1000, 4000}}, size: 5000,
wantValid: true,
wantAligned: []SparseEntry{{1024, 3976}},
wantInverted: []SparseEntry{{0, 1000}, {5000, 0}},
wantAligned: []sparseEntry{{1024, 3976}},
wantInverted: []sparseEntry{{0, 1000}, {5000, 0}},
}, {
in: []SparseEntry{{0, 3000}}, size: 5000,
in: []sparseEntry{{0, 3000}}, size: 5000,
wantValid: true,
wantAligned: []SparseEntry{{0, 2560}},
wantInverted: []SparseEntry{{3000, 2000}},
wantAligned: []sparseEntry{{0, 2560}},
wantInverted: []sparseEntry{{3000, 2000}},
}, {
in: []SparseEntry{{3000, 2000}}, size: 5000,
in: []sparseEntry{{3000, 2000}}, size: 5000,
wantValid: true,
wantAligned: []SparseEntry{{3072, 1928}},
wantInverted: []SparseEntry{{0, 3000}, {5000, 0}},
wantAligned: []sparseEntry{{3072, 1928}},
wantInverted: []sparseEntry{{0, 3000}, {5000, 0}},
}, {
in: []SparseEntry{{2000, 2000}}, size: 5000,
in: []sparseEntry{{2000, 2000}}, size: 5000,
wantValid: true,
wantAligned: []SparseEntry{{2048, 1536}},
wantInverted: []SparseEntry{{0, 2000}, {4000, 1000}},
wantAligned: []sparseEntry{{2048, 1536}},
wantInverted: []sparseEntry{{0, 2000}, {4000, 1000}},
}, {
in: []SparseEntry{{0, 2000}, {8000, 2000}}, size: 10000,
in: []sparseEntry{{0, 2000}, {8000, 2000}}, size: 10000,
wantValid: true,
wantAligned: []SparseEntry{{0, 1536}, {8192, 1808}},
wantInverted: []SparseEntry{{2000, 6000}, {10000, 0}},
wantAligned: []sparseEntry{{0, 1536}, {8192, 1808}},
wantInverted: []sparseEntry{{2000, 6000}, {10000, 0}},
}, {
in: []SparseEntry{{0, 2000}, {2000, 2000}, {4000, 0}, {4000, 3000}, {7000, 1000}, {8000, 0}, {8000, 2000}}, size: 10000,
in: []sparseEntry{{0, 2000}, {2000, 2000}, {4000, 0}, {4000, 3000}, {7000, 1000}, {8000, 0}, {8000, 2000}}, size: 10000,
wantValid: true,
wantAligned: []SparseEntry{{0, 1536}, {2048, 1536}, {4096, 2560}, {7168, 512}, {8192, 1808}},
wantInverted: []SparseEntry{{10000, 0}},
wantAligned: []sparseEntry{{0, 1536}, {2048, 1536}, {4096, 2560}, {7168, 512}, {8192, 1808}},
wantInverted: []sparseEntry{{10000, 0}},
}, {
in: []SparseEntry{{0, 0}, {1000, 0}, {2000, 0}, {3000, 0}, {4000, 0}, {5000, 0}}, size: 5000,
in: []sparseEntry{{0, 0}, {1000, 0}, {2000, 0}, {3000, 0}, {4000, 0}, {5000, 0}}, size: 5000,
wantValid: true,
wantInverted: []SparseEntry{{0, 5000}},
wantInverted: []sparseEntry{{0, 5000}},
}, {
in: []SparseEntry{{1, 0}}, size: 0,
in: []sparseEntry{{1, 0}}, size: 0,
wantValid: false,
}, {
in: []SparseEntry{{-1, 0}}, size: 100,
in: []sparseEntry{{-1, 0}}, size: 100,
wantValid: false,
}, {
in: []SparseEntry{{0, -1}}, size: 100,
in: []sparseEntry{{0, -1}}, size: 100,
wantValid: false,
}, {
in: []SparseEntry{{0, 0}}, size: -100,
in: []sparseEntry{{0, 0}}, size: -100,
wantValid: false,
}, {
in: []SparseEntry{{math.MaxInt64, 3}, {6, -5}}, size: 35,
in: []sparseEntry{{math.MaxInt64, 3}, {6, -5}}, size: 35,
wantValid: false,
}, {
in: []SparseEntry{{1, 3}, {6, -5}}, size: 35,
in: []sparseEntry{{1, 3}, {6, -5}}, size: 35,
wantValid: false,
}, {
in: []SparseEntry{{math.MaxInt64, math.MaxInt64}}, size: math.MaxInt64,
in: []sparseEntry{{math.MaxInt64, math.MaxInt64}}, size: math.MaxInt64,
wantValid: false,
}, {
in: []SparseEntry{{3, 3}}, size: 5,
in: []sparseEntry{{3, 3}}, size: 5,
wantValid: false,
}, {
in: []SparseEntry{{2, 0}, {1, 0}, {0, 0}}, size: 3,
in: []sparseEntry{{2, 0}, {1, 0}, {0, 0}}, size: 3,
wantValid: false,
}, {
in: []SparseEntry{{1, 3}, {2, 2}}, size: 10,
in: []sparseEntry{{1, 3}, {2, 2}}, size: 10,
wantValid: false,
}}
@ -198,11 +197,11 @@ func TestSparseEntries(t *testing.T) {
if !v.wantValid {
continue
}
gotAligned := alignSparseEntries(append([]SparseEntry{}, v.in...), v.size)
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
if !equalSparseEntries(gotAligned, v.wantAligned) {
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
}
gotInverted := invertSparseEntries(append([]SparseEntry{}, v.in...), v.size)
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
if !equalSparseEntries(gotInverted, v.wantInverted) {
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
}
@ -733,21 +732,6 @@ func TestHeaderAllowedFormats(t *testing.T) {
header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatPAX},
paxHdrs: map[string]string{paxCtime: "123.000000456"},
formats: FormatPAX,
}, {
header: &Header{Name: "sparse.db", Size: 1000, SparseHoles: []SparseEntry{{0, 500}}},
formats: FormatPAX,
}, {
header: &Header{Name: "sparse.db", Size: 1000, Typeflag: TypeGNUSparse, SparseHoles: []SparseEntry{{0, 500}}},
formats: FormatGNU,
}, {
header: &Header{Name: "sparse.db", Size: 1000, SparseHoles: []SparseEntry{{0, 500}}, Format: FormatGNU},
formats: FormatUnknown,
}, {
header: &Header{Name: "sparse.db", Size: 1000, Typeflag: TypeGNUSparse, SparseHoles: []SparseEntry{{0, 500}}, Format: FormatPAX},
formats: FormatUnknown,
}, {
header: &Header{Name: "sparse.db", Size: 1000, SparseHoles: []SparseEntry{{0, 500}}, Format: FormatUSTAR},
formats: FormatUnknown,
}, {
header: &Header{Name: "foo/", Typeflag: TypeDir},
formats: FormatUSTAR | FormatPAX | FormatGNU,
@ -776,140 +760,6 @@ func TestHeaderAllowedFormats(t *testing.T) {
}
}
func TestSparseFiles(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test on plan9; see https://golang.org/issue/21977")
}
// Only perform the tests for hole-detection on the builders,
// where we have greater control over the filesystem.
sparseSupport := testenv.Builder() != ""
switch runtime.GOOS + "-" + runtime.GOARCH {
case "linux-amd64", "linux-386", "windows-amd64", "windows-386":
default:
sparseSupport = false
}
vectors := []struct {
label string
sparseMap sparseHoles
}{
{"EmptyFile", sparseHoles{{0, 0}}},
{"BigData", sparseHoles{{1e6, 0}}},
{"BigHole", sparseHoles{{0, 1e6}}},
{"DataFront", sparseHoles{{1e3, 1e6 - 1e3}}},
{"HoleFront", sparseHoles{{0, 1e6 - 1e3}, {1e6, 0}}},
{"DataMiddle", sparseHoles{{0, 5e5 - 1e3}, {5e5, 5e5}}},
{"HoleMiddle", sparseHoles{{1e3, 1e6 - 2e3}, {1e6, 0}}},
{"Multiple", func() (sph []SparseEntry) {
const chunkSize = 1e6
for i := 0; i < 100; i++ {
sph = append(sph, SparseEntry{chunkSize * int64(i), chunkSize - 1e3})
}
return append(sph, SparseEntry{int64(len(sph) * chunkSize), 0})
}()},
}
for _, v := range vectors {
sph := v.sparseMap
t.Run(v.label, func(t *testing.T) {
src, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("unexpected TempFile error: %v", err)
}
defer os.Remove(src.Name())
dst, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("unexpected TempFile error: %v", err)
}
defer os.Remove(dst.Name())
// Create the source sparse file.
hdr := Header{
Typeflag: TypeReg,
Name: "sparse.db",
Size: sph[len(sph)-1].endOffset(),
SparseHoles: sph,
}
junk := bytes.Repeat([]byte{'Z'}, int(hdr.Size+1e3))
if _, err := src.Write(junk); err != nil {
t.Fatalf("unexpected Write error: %v", err)
}
if err := hdr.PunchSparseHoles(src); err != nil {
t.Fatalf("unexpected PunchSparseHoles error: %v", err)
}
var pos int64
for _, s := range sph {
b := bytes.Repeat([]byte{'X'}, int(s.Offset-pos))
if _, err := src.WriteAt(b, pos); err != nil {
t.Fatalf("unexpected WriteAt error: %v", err)
}
pos = s.endOffset()
}
// Round-trip the sparse file to/from a tar archive.
b := new(bytes.Buffer)
tw := NewWriter(b)
if err := tw.WriteHeader(&hdr); err != nil {
t.Fatalf("unexpected WriteHeader error: %v", err)
}
if _, err := tw.ReadFrom(src); err != nil {
t.Fatalf("unexpected ReadFrom error: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("unexpected Close error: %v", err)
}
tr := NewReader(b)
if _, err := tr.Next(); err != nil {
t.Fatalf("unexpected Next error: %v", err)
}
if err := hdr.PunchSparseHoles(dst); err != nil {
t.Fatalf("unexpected PunchSparseHoles error: %v", err)
}
if _, err := tr.WriteTo(dst); err != nil {
t.Fatalf("unexpected Copy error: %v", err)
}
// Verify the sparse file matches.
// Even if the OS and underlying FS do not support sparse files,
// the content should still match (i.e., holes read as zeros).
got, err := ioutil.ReadFile(dst.Name())
if err != nil {
t.Fatalf("unexpected ReadFile error: %v", err)
}
want, err := ioutil.ReadFile(src.Name())
if err != nil {
t.Fatalf("unexpected ReadFile error: %v", err)
}
if !bytes.Equal(got, want) {
t.Fatal("sparse files mismatch")
}
// Detect and compare the sparse holes.
if err := hdr.DetectSparseHoles(dst); err != nil {
t.Fatalf("unexpected DetectSparseHoles error: %v", err)
}
if sparseSupport && sysSparseDetect != nil {
if len(sph) > 0 && sph[len(sph)-1].Length == 0 {
sph = sph[:len(sph)-1]
}
if len(hdr.SparseHoles) != len(sph) {
t.Fatalf("len(SparseHoles) = %d, want %d", len(hdr.SparseHoles), len(sph))
}
for j, got := range hdr.SparseHoles {
// Each FS has their own block size, so these may not match.
want := sph[j]
if got.Offset < want.Offset {
t.Errorf("index %d, StartOffset = %d, want <%d", j, got.Offset, want.Offset)
}
if got.endOffset() > want.endOffset() {
t.Errorf("index %d, EndOffset = %d, want >%d", j, got.endOffset(), want.endOffset())
}
}
}
})
}
}
func Benchmark(b *testing.B) {
type file struct {
hdr *Header

View File

@ -10,7 +10,6 @@ import (
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
@ -46,7 +45,7 @@ type fileWriter interface {
// Flush finishes writing the current file's block padding.
// The current file must be fully written before Flush can be called.
//
// Deprecated: This is unnecessary as the next call to WriteHeader or Close
// This is unnecessary as the next call to WriteHeader or Close
// will implicitly flush out the file's padding.
func (tw *Writer) Flush() error {
if tw.err != nil {
@ -120,11 +119,14 @@ func (tw *Writer) writeUSTARHeader(hdr *Header) error {
func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
realName, realSize := hdr.Name, hdr.Size
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
// Handle sparse files.
var spd sparseDatas
var spb []byte
if len(hdr.SparseHoles) > 0 {
sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
sph = alignSparseEntries(sph, hdr.Size)
spd = invertSparseEntries(sph, hdr.Size)
@ -150,6 +152,8 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
}
*/
_ = realSize
// Write PAX records to the output.
isGlobal := hdr.Typeflag == TypeXGlobalHeader
@ -197,6 +201,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
return err
}
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
// Write the sparse map and setup the sparse writer if necessary.
if len(spd) > 0 {
// Use tw.curr since the sparse map is accounted for in hdr.Size.
@ -205,6 +212,7 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
}
tw.curr = &sparseFileWriter{tw.curr, spd, 0}
}
*/
return nil
}
@ -235,8 +243,11 @@ func (tw *Writer) writeGNUHeader(hdr *Header) error {
if !hdr.ChangeTime.IsZero() {
f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
}
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
if hdr.Typeflag == TypeGNUSparse {
sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
sph = alignSparseEntries(sph, hdr.Size)
spd = invertSparseEntries(sph, hdr.Size)
@ -269,6 +280,7 @@ func (tw *Writer) writeGNUHeader(hdr *Header) error {
f.formatNumeric(blk.V7().Size(), hdr.Size)
f.formatNumeric(blk.GNU().RealSize(), realSize)
}
*/
blk.SetFormat(FormatGNU)
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
return err
@ -401,9 +413,6 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
// Write returns the error ErrWriteTooLong if more than
// Header.Size bytes are written after WriteHeader.
//
// If the current file is sparse, then the regions marked as a hole
// must be written as NUL-bytes.
//
// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
// of what the Header.Size claims.
@ -418,14 +427,17 @@ func (tw *Writer) Write(b []byte) (int, error) {
return n, err
}
// ReadFrom populates the content of the current file by reading from r.
// readFrom populates the content of the current file by reading from r.
// The bytes read must match the number of remaining bytes in the current file.
//
// If the current file is sparse and r is an io.ReadSeeker,
// then ReadFrom uses Seek to skip past holes defined in Header.SparseHoles,
// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
// assuming that skipped regions are all NULs.
// This always reads the last byte to ensure r is the right size.
func (tw *Writer) ReadFrom(r io.Reader) (int64, error) {
//
// TODO(dsnet): Re-export this when adding sparse file support.
// See https://golang.org/issue/22735
func (tw *Writer) readFrom(r io.Reader) (int64, error) {
if tw.err != nil {
return 0, tw.err
}

View File

@ -339,6 +339,9 @@ func TestWriter(t *testing.T) {
}, nil},
testClose{nil},
},
// TODO(dsnet): Re-enable this test when adding sparse support.
// See https://golang.org/issue/22735
/*
}, {
file: "testdata/gnu-nil-sparse-data.tar",
tests: []testFnc{
@ -346,7 +349,7 @@ func TestWriter(t *testing.T) {
Typeflag: TypeGNUSparse,
Name: "sparse.db",
Size: 1000,
SparseHoles: []SparseEntry{{Offset: 1000, Length: 0}},
SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}},
}, nil},
testWrite{strings.Repeat("0123456789", 100), 1000, nil},
testClose{},
@ -358,7 +361,7 @@ func TestWriter(t *testing.T) {
Typeflag: TypeGNUSparse,
Name: "sparse.db",
Size: 1000,
SparseHoles: []SparseEntry{{Offset: 0, Length: 1000}},
SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}},
}, nil},
testWrite{strings.Repeat("\x00", 1000), 1000, nil},
testClose{},
@ -370,7 +373,7 @@ func TestWriter(t *testing.T) {
Typeflag: TypeReg,
Name: "sparse.db",
Size: 1000,
SparseHoles: []SparseEntry{{Offset: 1000, Length: 0}},
SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}},
}, nil},
testWrite{strings.Repeat("0123456789", 100), 1000, nil},
testClose{},
@ -382,7 +385,7 @@ func TestWriter(t *testing.T) {
Typeflag: TypeReg,
Name: "sparse.db",
Size: 1000,
SparseHoles: []SparseEntry{{Offset: 0, Length: 1000}},
SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}},
}, nil},
testWrite{strings.Repeat("\x00", 1000), 1000, nil},
testClose{},
@ -394,7 +397,7 @@ func TestWriter(t *testing.T) {
Typeflag: TypeGNUSparse,
Name: "gnu-sparse",
Size: 6e10,
SparseHoles: []SparseEntry{
SparseHoles: []sparseEntry{
{Offset: 0e10, Length: 1e10 - 100},
{Offset: 1e10, Length: 1e10 - 100},
{Offset: 2e10, Length: 1e10 - 100},
@ -426,7 +429,7 @@ func TestWriter(t *testing.T) {
Typeflag: TypeReg,
Name: "pax-sparse",
Size: 6e10,
SparseHoles: []SparseEntry{
SparseHoles: []sparseEntry{
{Offset: 0e10, Length: 1e10 - 100},
{Offset: 1e10, Length: 1e10 - 100},
{Offset: 2e10, Length: 1e10 - 100},
@ -451,6 +454,7 @@ func TestWriter(t *testing.T) {
}, 6e10, nil},
testClose{nil},
},
*/
}, {
file: "testdata/trailing-slash.tar",
tests: []testFnc{
@ -487,7 +491,7 @@ func TestWriter(t *testing.T) {
}
case testReadFrom:
f := &testFile{ops: tf.ops}
got, err := tw.ReadFrom(f)
got, err := tw.readFrom(f)
if _, ok := err.(testError); ok {
t.Errorf("test %d, ReadFrom(): %v", i, err)
} else if got != tf.wantCnt || !equalError(err, tf.wantErr) {