mirror of
https://gitclone.com/github.com/MetaCubeX/Clash.Meta
synced 2025-05-25 19:38:07 +08:00
Compare commits
4 Commits
9f7a2a36c1
...
d5a03901d2
Author | SHA1 | Date | |
---|---|---|---|
|
d5a03901d2 | ||
|
257fead538 | ||
|
c489c5260b | ||
|
8f92b1de13 |
@ -118,7 +118,10 @@ func (u *UIUpdater) downloadUI() error {
|
|||||||
|
|
||||||
tmpDir := C.Path.Resolve("downloadUI.tmp")
|
tmpDir := C.Path.Resolve("downloadUI.tmp")
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
extractedFolder, err := extract(data, tmpDir)
|
|
||||||
|
os.RemoveAll(tmpDir) // cleanup tmp dir before extract
|
||||||
|
log.Debugln("extractedFolder: %s", tmpDir)
|
||||||
|
err = extract(data, tmpDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't extract compressed file: %w", err)
|
return fmt.Errorf("can't extract compressed file: %w", err)
|
||||||
}
|
}
|
||||||
@ -136,8 +139,8 @@ func (u *UIUpdater) downloadUI() error {
|
|||||||
return fmt.Errorf("prepare UI path failed: %w", err)
|
return fmt.Errorf("prepare UI path failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugln("moveFolder from %s to %s", extractedFolder, u.externalUIPath)
|
log.Debugln("moveFolder from %s to %s", tmpDir, u.externalUIPath)
|
||||||
err = moveDir(extractedFolder, u.externalUIPath) // move files from tmp to target
|
err = moveDir(tmpDir, u.externalUIPath) // move files from tmp to target
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("move UI folder failed: %w", err)
|
return fmt.Errorf("move UI folder failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -154,63 +157,19 @@ func (u *UIUpdater) prepareUIPath() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func unzip(data []byte, dest string) (string, error) {
|
func unzip(data []byte, dest string) error {
|
||||||
r, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
r, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check whether or not only exists singleRoot dir
|
// check whether or not only exists singleRoot dir
|
||||||
rootDir := ""
|
|
||||||
isSingleRoot := true
|
|
||||||
rootItemCount := 0
|
|
||||||
for _, f := range r.File {
|
|
||||||
parts := strings.Split(strings.Trim(f.Name, "/"), "/")
|
|
||||||
if len(parts) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) == 1 {
|
|
||||||
isDir := strings.HasSuffix(f.Name, "/")
|
|
||||||
if !isDir {
|
|
||||||
isSingleRoot = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootDir == "" {
|
|
||||||
rootDir = parts[0]
|
|
||||||
}
|
|
||||||
rootItemCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootItemCount != 1 {
|
|
||||||
isSingleRoot = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// build the dir of extraction
|
|
||||||
var extractedFolder string
|
|
||||||
if isSingleRoot && rootDir != "" {
|
|
||||||
// if the singleRoot, use it directly
|
|
||||||
log.Debugln("Match the singleRoot")
|
|
||||||
extractedFolder = filepath.Join(dest, rootDir)
|
|
||||||
log.Debugln("extractedFolder: %s", extractedFolder)
|
|
||||||
} else {
|
|
||||||
log.Debugln("Match the multiRoot")
|
|
||||||
extractedFolder = dest
|
|
||||||
log.Debugln("extractedFolder: %s", extractedFolder)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range r.File {
|
for _, f := range r.File {
|
||||||
var fpath string
|
fpath := filepath.Join(dest, f.Name)
|
||||||
if isSingleRoot && rootDir != "" {
|
|
||||||
fpath = filepath.Join(dest, f.Name)
|
|
||||||
} else {
|
|
||||||
fpath = filepath.Join(extractedFolder, f.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !inDest(fpath, dest) {
|
if !inDest(fpath, dest) {
|
||||||
return "", fmt.Errorf("invalid file path: %s", fpath)
|
return fmt.Errorf("invalid file path: %s", fpath)
|
||||||
}
|
}
|
||||||
info := f.FileInfo()
|
info := f.FileInfo()
|
||||||
if info.IsDir() {
|
if info.IsDir() {
|
||||||
@ -221,128 +180,77 @@ func unzip(data []byte, dest string) (string, error) {
|
|||||||
continue // disallow symlink
|
continue // disallow symlink
|
||||||
}
|
}
|
||||||
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
rc, err := f.Open()
|
rc, err := f.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
_, err = io.Copy(outFile, rc)
|
_, err = io.Copy(outFile, rc)
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
rc.Close()
|
rc.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return extractedFolder, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func untgz(data []byte, dest string) (string, error) {
|
func untgz(data []byte, dest string) error {
|
||||||
gzr, err := gzip.NewReader(bytes.NewReader(data))
|
gzr, err := gzip.NewReader(bytes.NewReader(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
defer gzr.Close()
|
defer gzr.Close()
|
||||||
|
|
||||||
tr := tar.NewReader(gzr)
|
tr := tar.NewReader(gzr)
|
||||||
|
|
||||||
rootDir := ""
|
|
||||||
isSingleRoot := true
|
|
||||||
rootItemCount := 0
|
|
||||||
for {
|
|
||||||
header, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(cleanTarPath(header.Name), string(os.PathSeparator))
|
|
||||||
if len(parts) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) == 1 {
|
|
||||||
isDir := header.Typeflag == tar.TypeDir
|
|
||||||
if !isDir {
|
|
||||||
isSingleRoot = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootDir == "" {
|
|
||||||
rootDir = parts[0]
|
|
||||||
}
|
|
||||||
rootItemCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootItemCount != 1 {
|
|
||||||
isSingleRoot = false
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = gzr.Reset(bytes.NewReader(data))
|
_ = gzr.Reset(bytes.NewReader(data))
|
||||||
tr = tar.NewReader(gzr)
|
tr = tar.NewReader(gzr)
|
||||||
|
|
||||||
var extractedFolder string
|
|
||||||
if isSingleRoot && rootDir != "" {
|
|
||||||
log.Debugln("Match the singleRoot")
|
|
||||||
extractedFolder = filepath.Join(dest, rootDir)
|
|
||||||
log.Debugln("extractedFolder: %s", extractedFolder)
|
|
||||||
} else {
|
|
||||||
log.Debugln("Match the multiRoot")
|
|
||||||
extractedFolder = dest
|
|
||||||
log.Debugln("extractedFolder: %s", extractedFolder)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
header, err := tr.Next()
|
header, err := tr.Next()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var fpath string
|
fpath := filepath.Join(dest, header.Name)
|
||||||
if isSingleRoot && rootDir != "" {
|
|
||||||
fpath = filepath.Join(dest, cleanTarPath(header.Name))
|
|
||||||
} else {
|
|
||||||
fpath = filepath.Join(extractedFolder, cleanTarPath(header.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !inDest(fpath, dest) {
|
if !inDest(fpath, dest) {
|
||||||
return "", fmt.Errorf("invalid file path: %s", fpath)
|
return fmt.Errorf("invalid file path: %s", fpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch header.Typeflag {
|
switch header.Typeflag {
|
||||||
case tar.TypeDir:
|
case tar.TypeDir:
|
||||||
if err = os.MkdirAll(fpath, os.FileMode(header.Mode)); err != nil {
|
if err = os.MkdirAll(fpath, os.FileMode(header.Mode)); err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
case tar.TypeReg:
|
case tar.TypeReg:
|
||||||
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(header.Mode))
|
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(header.Mode))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := io.Copy(outFile, tr); err != nil {
|
if _, err := io.Copy(outFile, tr); err != nil {
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return extractedFolder, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func extract(data []byte, dest string) (string, error) {
|
func extract(data []byte, dest string) error {
|
||||||
fileType := detectFileType(data)
|
fileType := detectFileType(data)
|
||||||
log.Debugln("compression Type: %s", fileType)
|
log.Debugln("compression Type: %s", fileType)
|
||||||
switch fileType {
|
switch fileType {
|
||||||
@ -351,7 +259,7 @@ func extract(data []byte, dest string) (string, error) {
|
|||||||
case typeTarGzip:
|
case typeTarGzip:
|
||||||
return untgz(data, dest)
|
return untgz(data, dest)
|
||||||
default:
|
default:
|
||||||
return "", fmt.Errorf("unknown or unsupported file type")
|
return fmt.Errorf("unknown or unsupported file type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -393,6 +301,15 @@ func moveDir(src string, dst string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(dirEntryList) == 1 && dirEntryList[0].IsDir() {
|
||||||
|
src = filepath.Join(src, dirEntryList[0].Name())
|
||||||
|
log.Debugln("match the singleRoot: %s", src)
|
||||||
|
dirEntryList, err = os.ReadDir(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, dirEntry := range dirEntryList {
|
for _, dirEntry := range dirEntryList {
|
||||||
err = os.Rename(filepath.Join(src, dirEntry.Name()), filepath.Join(dst, dirEntry.Name()))
|
err = os.Rename(filepath.Join(src, dirEntry.Name()), filepath.Join(dst, dirEntry.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -645,6 +645,7 @@ proxies: # socks5
|
|||||||
reality-opts:
|
reality-opts:
|
||||||
public-key: xxx
|
public-key: xxx
|
||||||
short-id: xxx # optional
|
short-id: xxx # optional
|
||||||
|
support-x25519mlkem768: false # 如果服务端支持可手动设置为true
|
||||||
client-fingerprint: chrome # cannot be empty
|
client-fingerprint: chrome # cannot be empty
|
||||||
|
|
||||||
- name: "vless-reality-grpc"
|
- name: "vless-reality-grpc"
|
||||||
@ -664,6 +665,7 @@ proxies: # socks5
|
|||||||
reality-opts:
|
reality-opts:
|
||||||
public-key: CrrQSjAG_YkHLwvM2M-7XkKJilgL5upBKCp0od0tLhE
|
public-key: CrrQSjAG_YkHLwvM2M-7XkKJilgL5upBKCp0od0tLhE
|
||||||
short-id: 10f897e26c4b9478
|
short-id: 10f897e26c4b9478
|
||||||
|
support-x25519mlkem768: false # 如果服务端支持可手动设置为true
|
||||||
|
|
||||||
- name: "vless-ws"
|
- name: "vless-ws"
|
||||||
type: vless
|
type: vless
|
||||||
|
2
go.mod
2
go.mod
@ -27,7 +27,7 @@ require (
|
|||||||
github.com/metacubex/randv2 v0.2.0
|
github.com/metacubex/randv2 v0.2.0
|
||||||
github.com/metacubex/sing v0.5.3-0.20250504031621-1f99e54c15b7
|
github.com/metacubex/sing v0.5.3-0.20250504031621-1f99e54c15b7
|
||||||
github.com/metacubex/sing-mux v0.3.2
|
github.com/metacubex/sing-mux v0.3.2
|
||||||
github.com/metacubex/sing-quic v0.0.0-20250517090120-462e75d27336
|
github.com/metacubex/sing-quic v0.0.0-20250520025433-6e556a6bef7a
|
||||||
github.com/metacubex/sing-shadowsocks v0.2.9
|
github.com/metacubex/sing-shadowsocks v0.2.9
|
||||||
github.com/metacubex/sing-shadowsocks2 v0.2.3
|
github.com/metacubex/sing-shadowsocks2 v0.2.3
|
||||||
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2
|
github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2
|
||||||
|
6
go.sum
6
go.sum
@ -120,10 +120,8 @@ github.com/metacubex/sing v0.5.3-0.20250504031621-1f99e54c15b7 h1:m4nSxvw46JEgxM
|
|||||||
github.com/metacubex/sing v0.5.3-0.20250504031621-1f99e54c15b7/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
|
github.com/metacubex/sing v0.5.3-0.20250504031621-1f99e54c15b7/go.mod h1:ypf0mjwlZm0sKdQSY+yQvmsbWa0hNPtkeqyRMGgoN+w=
|
||||||
github.com/metacubex/sing-mux v0.3.2 h1:nJv52pyRivHcaZJKk2JgxpaVvj1GAXG81scSa9N7ncw=
|
github.com/metacubex/sing-mux v0.3.2 h1:nJv52pyRivHcaZJKk2JgxpaVvj1GAXG81scSa9N7ncw=
|
||||||
github.com/metacubex/sing-mux v0.3.2/go.mod h1:3rt1soewn0O6j89GCLmwAQFsq257u0jf2zQSPhTL3Bw=
|
github.com/metacubex/sing-mux v0.3.2/go.mod h1:3rt1soewn0O6j89GCLmwAQFsq257u0jf2zQSPhTL3Bw=
|
||||||
github.com/metacubex/sing-quic v0.0.0-20250511034158-b46e0e3e81b2 h1:wfmYgtECbEYo1slMtyo+2kMqscYYDSjU/TVgS3018F4=
|
github.com/metacubex/sing-quic v0.0.0-20250520025433-6e556a6bef7a h1:Ho73vGiB94LmtK5T+tKVwtCNEi/YiHmPjlqpHSAmAVs=
|
||||||
github.com/metacubex/sing-quic v0.0.0-20250511034158-b46e0e3e81b2/go.mod h1:P1kd57U6XXmXv9PbwWdznUGT0k9bKgFJXF0fEORbIlk=
|
github.com/metacubex/sing-quic v0.0.0-20250520025433-6e556a6bef7a/go.mod h1:JPTpf7fpnojsSuwRJExhSZSy63pVbp3VM39+zj+sAJM=
|
||||||
github.com/metacubex/sing-quic v0.0.0-20250517090120-462e75d27336 h1:5BgpaFkTzkePwF1A8rmhCqgyOMG79BLsAhFR8W8SiRo=
|
|
||||||
github.com/metacubex/sing-quic v0.0.0-20250517090120-462e75d27336/go.mod h1:JPTpf7fpnojsSuwRJExhSZSy63pVbp3VM39+zj+sAJM=
|
|
||||||
github.com/metacubex/sing-shadowsocks v0.2.9 h1:2e++13WNN7EGjGtvrGLUzW1xrCdQbW2gIFpgw5GEw00=
|
github.com/metacubex/sing-shadowsocks v0.2.9 h1:2e++13WNN7EGjGtvrGLUzW1xrCdQbW2gIFpgw5GEw00=
|
||||||
github.com/metacubex/sing-shadowsocks v0.2.9/go.mod h1:CJSEGO4FWQAWe+ZiLZxCweGdjRR60A61SIoVjdjQeBA=
|
github.com/metacubex/sing-shadowsocks v0.2.9/go.mod h1:CJSEGO4FWQAWe+ZiLZxCweGdjRR60A61SIoVjdjQeBA=
|
||||||
github.com/metacubex/sing-shadowsocks2 v0.2.3 h1:v3rNS/5Ywh0NIZ6VU/NmdERQIN5RePzyxCFeQsU4Cx0=
|
github.com/metacubex/sing-shadowsocks2 v0.2.3 h1:v3rNS/5Ywh0NIZ6VU/NmdERQIN5RePzyxCFeQsU4Cx0=
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/metacubex/mihomo/common/atomic"
|
|
||||||
"github.com/metacubex/mihomo/common/buf"
|
"github.com/metacubex/mihomo/common/buf"
|
||||||
"github.com/metacubex/mihomo/common/pool"
|
"github.com/metacubex/mihomo/common/pool"
|
||||||
"github.com/metacubex/mihomo/component/ech"
|
"github.com/metacubex/mihomo/component/ech"
|
||||||
@ -42,16 +41,19 @@ type DialFn = func(ctx context.Context, network, addr string) (net.Conn, error)
|
|||||||
|
|
||||||
type Conn struct {
|
type Conn struct {
|
||||||
initFn func() (io.ReadCloser, netAddr, error)
|
initFn func() (io.ReadCloser, netAddr, error)
|
||||||
writer io.Writer
|
writer io.Writer // writer must not nil
|
||||||
closer io.Closer
|
closer io.Closer
|
||||||
netAddr
|
netAddr
|
||||||
|
|
||||||
reader io.ReadCloser
|
initOnce sync.Once
|
||||||
once sync.Once
|
initErr error
|
||||||
closed atomic.Bool
|
reader io.ReadCloser
|
||||||
err error
|
br *bufio.Reader
|
||||||
remain int
|
remain int
|
||||||
br *bufio.Reader
|
|
||||||
|
closeMutex sync.Mutex
|
||||||
|
closed bool
|
||||||
|
|
||||||
// deadlines
|
// deadlines
|
||||||
deadline *time.Timer
|
deadline *time.Timer
|
||||||
}
|
}
|
||||||
@ -65,7 +67,7 @@ type Config struct {
|
|||||||
func (g *Conn) initReader() {
|
func (g *Conn) initReader() {
|
||||||
reader, addr, err := g.initFn()
|
reader, addr, err := g.initFn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
g.err = err
|
g.initErr = err
|
||||||
if closer, ok := g.writer.(io.Closer); ok {
|
if closer, ok := g.writer.(io.Closer); ok {
|
||||||
closer.Close()
|
closer.Close()
|
||||||
}
|
}
|
||||||
@ -73,17 +75,21 @@ func (g *Conn) initReader() {
|
|||||||
}
|
}
|
||||||
g.netAddr = addr
|
g.netAddr = addr
|
||||||
|
|
||||||
if !g.closed.Load() {
|
g.closeMutex.Lock()
|
||||||
g.reader = reader
|
defer g.closeMutex.Unlock()
|
||||||
g.br = bufio.NewReader(reader)
|
if g.closed { // if g.Close() be called between g.initFn(), direct close the initFn returned reader
|
||||||
} else {
|
_ = reader.Close()
|
||||||
reader.Close()
|
g.initErr = net.ErrClosed
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
g.reader = reader
|
||||||
|
g.br = bufio.NewReader(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *Conn) Init() error {
|
func (g *Conn) Init() error {
|
||||||
g.once.Do(g.initReader)
|
g.initOnce.Do(g.initReader)
|
||||||
return g.err
|
return g.initErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *Conn) Read(b []byte) (n int, err error) {
|
func (g *Conn) Read(b []byte) (n int, err error) {
|
||||||
@ -100,8 +106,6 @@ func (g *Conn) Read(b []byte) (n int, err error) {
|
|||||||
n, err = io.ReadFull(g.br, b[:size])
|
n, err = io.ReadFull(g.br, b[:size])
|
||||||
g.remain -= n
|
g.remain -= n
|
||||||
return
|
return
|
||||||
} else if g.reader == nil {
|
|
||||||
return 0, net.ErrClosed
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0x00 grpclength(uint32) 0x0A uleb128 payload
|
// 0x00 grpclength(uint32) 0x0A uleb128 payload
|
||||||
@ -147,8 +151,8 @@ func (g *Conn) Write(b []byte) (n int, err error) {
|
|||||||
buf.Write(b)
|
buf.Write(b)
|
||||||
|
|
||||||
_, err = g.writer.Write(buf.Bytes())
|
_, err = g.writer.Write(buf.Bytes())
|
||||||
if err == io.ErrClosedPipe && g.err != nil {
|
if err == io.ErrClosedPipe && g.initErr != nil {
|
||||||
err = g.err
|
err = g.initErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if flusher, ok := g.writer.(http.Flusher); ok {
|
if flusher, ok := g.writer.(http.Flusher); ok {
|
||||||
@ -170,8 +174,8 @@ func (g *Conn) WriteBuffer(buffer *buf.Buffer) error {
|
|||||||
binary.PutUvarint(header[6:], uint64(dataLen))
|
binary.PutUvarint(header[6:], uint64(dataLen))
|
||||||
_, err := g.writer.Write(buffer.Bytes())
|
_, err := g.writer.Write(buffer.Bytes())
|
||||||
|
|
||||||
if err == io.ErrClosedPipe && g.err != nil {
|
if err == io.ErrClosedPipe && g.initErr != nil {
|
||||||
err = g.err
|
err = g.initErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if flusher, ok := g.writer.(http.Flusher); ok {
|
if flusher, ok := g.writer.(http.Flusher); ok {
|
||||||
@ -186,7 +190,17 @@ func (g *Conn) FrontHeadroom() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *Conn) Close() error {
|
func (g *Conn) Close() error {
|
||||||
g.closed.Store(true)
|
g.initOnce.Do(func() { // if initReader not called, it should not be run anymore
|
||||||
|
g.initErr = net.ErrClosed
|
||||||
|
})
|
||||||
|
|
||||||
|
g.closeMutex.Lock()
|
||||||
|
defer g.closeMutex.Unlock()
|
||||||
|
if g.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
g.closed = true
|
||||||
|
|
||||||
var errorArr []error
|
var errorArr []error
|
||||||
|
|
||||||
if reader := g.reader; reader != nil {
|
if reader := g.reader; reader != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user