@ -27,7 +27,11 @@ exports.create = create;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -40,7 +44,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -78,9 +82,9 @@ class DefaultArtifactClient {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
core . info ( ` Starting artifact upload
For more detailed logs during the artifact upload process , enable step - debugging : https : //docs.github.com/actions/monitoring-and-troubleshooting-workflows/enabling-debug-logging#enabling-step-debug-logging`);
path _and _artifact _name _validation _1 . checkArtifactName ( name ) ;
( 0 , path _and _artifact _name _validation _1 . checkArtifactName ) ( name ) ;
// Get specification for the files being uploaded
const uploadSpecification = upload _specification _1 . getUploadSpecification ( name , rootDirectory , files ) ;
const uploadSpecification = ( 0 , upload _specification _1 . getUploadSpecification ) ( name , rootDirectory , files ) ;
const uploadResponse = {
artifactName : name ,
artifactItems : [ ] ,
@ -139,20 +143,20 @@ Note: The size of downloaded zips can differ significantly from the reported siz
}
const items = yield downloadHttpClient . getContainerItems ( artifactToDownload . name , artifactToDownload . fileContainerResourceUrl ) ;
if ( ! path ) {
path = config _variables _1 . getWorkSpaceDirectory ( ) ;
path = ( 0 , config _variables _1 . getWorkSpaceDirectory ) ( ) ;
}
path = path _1 . normalize ( path ) ;
path = path _1 . resolve ( path ) ;
path = ( 0 , path _1 . normalize ) ( path ) ;
path = ( 0 , path _1 . resolve ) ( path ) ;
// During upload, empty directories are rejected by the remote server so there should be no artifacts that consist of only empty directories
const downloadSpecification = download _specification _1 . getDownloadSpecification ( name , items . value , path , ( options === null || options === void 0 ? void 0 : options . createArtifactFolder ) || false ) ;
const downloadSpecification = ( 0 , download _specification _1 . getDownloadSpecification ) ( name , items . value , path , ( options === null || options === void 0 ? void 0 : options . createArtifactFolder ) || false ) ;
if ( downloadSpecification . filesToDownload . length === 0 ) {
core . info ( ` No downloadable files were found for the artifact: ${ artifactToDownload . name } ` ) ;
}
else {
// Create all necessary directories recursively before starting any download
yield utils _1 . createDirectoriesForArtifact ( downloadSpecification . directoryStructure ) ;
core . info ( 'Directory structure has been set up for the artifact') ;
yield utils _1 . createEmptyFilesForArtifact ( downloadSpecification . emptyFilesToCreate ) ;
yield ( 0 , utils _1 . createDirectoriesForArtifact ) ( downloadSpecification . directoryStructure ) ;
core . info ( 'Directory structure has been set up for the artifact') ;
yield ( 0 , utils _1 . createEmptyFilesForArtifact ) ( downloadSpecification . emptyFilesToCreate ) ;
yield downloadHttpClient . downloadSingleArtifact ( downloadSpecification . filesToDownload ) ;
}
return {
@ -171,10 +175,10 @@ Note: The size of downloaded zips can differ significantly from the reported siz
return response ;
}
if ( ! path ) {
path = config _variables _1 . getWorkSpaceDirectory ( ) ;
path = ( 0 , config _variables _1 . getWorkSpaceDirectory ) ( ) ;
}
path = path _1 . normalize ( path ) ;
path = path _1 . resolve ( path ) ;
path = ( 0 , path _1 . normalize ) ( path ) ;
path = ( 0 , path _1 . resolve ) ( path ) ;
let downloadedArtifacts = 0 ;
while ( downloadedArtifacts < artifacts . count ) {
const currentArtifactToDownload = artifacts . value [ downloadedArtifacts ] ;
@ -182,13 +186,13 @@ Note: The size of downloaded zips can differ significantly from the reported siz
core . info ( ` starting download of artifact ${ currentArtifactToDownload . name } : ${ downloadedArtifacts } / ${ artifacts . count } ` ) ;
// Get container entries for the specific artifact
const items = yield downloadHttpClient . getContainerItems ( currentArtifactToDownload . name , currentArtifactToDownload . fileContainerResourceUrl ) ;
const downloadSpecification = download _specification _1 . getDownloadSpecification ( currentArtifactToDownload . name , items . value , path , true ) ;
const downloadSpecification = ( 0 , download _specification _1 . getDownloadSpecification ) ( currentArtifactToDownload . name , items . value , path , true ) ;
if ( downloadSpecification . filesToDownload . length === 0 ) {
core . info ( ` No downloadable files were found for any artifact ${ currentArtifactToDownload . name } ` ) ;
}
else {
yield utils _1 . createDirectoriesForArtifact ( downloadSpecification . directoryStructure ) ;
yield utils _1 . createEmptyFilesForArtifact ( downloadSpecification . emptyFilesToCreate ) ;
yield ( 0 , utils _1 . createDirectoriesForArtifact ) ( downloadSpecification . directoryStructure ) ;
yield ( 0 , utils _1 . createEmptyFilesForArtifact ) ( downloadSpecification . emptyFilesToCreate ) ;
yield downloadHttpClient . downloadSingleArtifact ( downloadSpecification . filesToDownload ) ;
}
response . push ( {
@ -211,7 +215,7 @@ exports.DefaultArtifactClient = DefaultArtifactClient;
"use strict" ;
Object . defineProperty ( exports , "__esModule" , ( { value : true } ) ) ;
exports . getRetentionDays = exports . getWorkSpaceDirectory = exports . getWorkFlowRunId = exports . getRuntimeUrl = exports . getRuntimeToken = exports . getDownloadFileConcurrency = exports . getInitialRetryIntervalInMilliseconds = exports . getRetryMultiplier = exports . getRetryLimit = exports . getUploadChunkSize = exports . getUploadFileConcurrency = void 0 ;
exports . isGhes = exports . getRetentionDays = exports . getWorkSpaceDirectory = exports . getWorkFlowRunId = exports . getRuntimeUrl = exports . getRuntimeToken = exports . getDownloadFileConcurrency = exports . getInitialRetryIntervalInMilliseconds = exports . getRetryMultiplier = exports . getRetryLimit = exports . getUploadChunkSize = exports . getUploadFileConcurrency = void 0 ;
// The number of concurrent uploads that happens at the same time
function getUploadFileConcurrency ( ) {
return 2 ;
@ -280,6 +284,11 @@ function getRetentionDays() {
return process . env [ 'GITHUB_RETENTION_DAYS' ] ;
}
exports . getRetentionDays = getRetentionDays ;
function isGhes ( ) {
const ghUrl = new URL ( process . env [ 'GITHUB_SERVER_URL' ] || 'https://github.com' ) ;
return ghUrl . hostname . toUpperCase ( ) !== 'GITHUB.COM' ;
}
exports . isGhes = isGhes ;
//# sourceMappingURL=config-variables.js.map
/***/ } ) ,
@ -601,7 +610,11 @@ exports["default"] = CRC64;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -614,7 +627,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -641,7 +654,7 @@ const config_variables_1 = __nccwpck_require__(2222);
const requestUtils _1 = _ _nccwpck _require _ _ ( 755 ) ;
class DownloadHttpClient {
constructor ( ) {
this . downloadHttpManager = new http _manager _1 . HttpManager ( config _variables _1 . getDownloadFileConcurrency ( ) , '@actions/artifact-download' ) ;
this . downloadHttpManager = new http _manager _1 . HttpManager ( ( 0 , config _variables _1 . getDownloadFileConcurrency ) ( ) , '@actions/artifact-download' ) ;
// downloads are usually significantly faster than uploads so display status information every second
this . statusReporter = new status _reporter _1 . StatusReporter ( 1000 ) ;
}
@ -650,11 +663,11 @@ class DownloadHttpClient {
* /
listArtifacts ( ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const artifactUrl = utils _1 . getArtifactUrl ( ) ;
const artifactUrl = ( 0 , utils _1 . getArtifactUrl ) ( ) ;
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
const client = this . downloadHttpManager . getClient ( 0 ) ;
const headers = utils _1 . getDownloadHeaders ( 'application/json' ) ;
const response = yield requestUtils _1 . retryHttpClientRequest ( 'List Artifacts' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . get ( artifactUrl , headers ) ; } ) ) ;
const headers = ( 0 , utils _1 . getDownloadHeaders ) ( 'application/json' ) ;
const response = yield ( 0 , requestUtils _1 . retryHttpClientRequest ) ( 'List Artifacts' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . get ( artifactUrl , headers ) ; } ) ) ;
const body = yield response . readBody ( ) ;
return JSON . parse ( body ) ;
} ) ;
@ -671,8 +684,8 @@ class DownloadHttpClient {
resourceUrl . searchParams . append ( 'itemPath' , artifactName ) ;
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
const client = this . downloadHttpManager . getClient ( 0 ) ;
const headers = utils _1 . getDownloadHeaders ( 'application/json' ) ;
const response = yield requestUtils _1 . retryHttpClientRequest ( 'Get Container Items' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . get ( resourceUrl . toString ( ) , headers ) ; } ) ) ;
const headers = ( 0 , utils _1 . getDownloadHeaders ) ( 'application/json' ) ;
const response = yield ( 0 , requestUtils _1 . retryHttpClientRequest ) ( 'Get Container Items' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . get ( resourceUrl . toString ( ) , headers ) ; } ) ) ;
const body = yield response . readBody ( ) ;
return JSON . parse ( body ) ;
} ) ;
@ -683,7 +696,7 @@ class DownloadHttpClient {
* /
downloadSingleArtifact ( downloadItems ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const DOWNLOAD _CONCURRENCY = config _variables _1 . getDownloadFileConcurrency ( ) ;
const DOWNLOAD _CONCURRENCY = ( 0 , config _variables _1 . getDownloadFileConcurrency ) ( ) ;
// limit the number of files downloaded at a single time
core . debug ( ` Download file concurrency is set to ${ DOWNLOAD _CONCURRENCY } ` ) ;
const parallelDownloads = [ ... new Array ( DOWNLOAD _CONCURRENCY ) . keys ( ) ] ;
@ -723,9 +736,9 @@ class DownloadHttpClient {
downloadIndividualFile ( httpClientIndex , artifactLocation , downloadPath ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
let retryCount = 0 ;
const retryLimit = config _variables _1 . getRetryLimit ( ) ;
const retryLimit = ( 0 , config _variables _1 . getRetryLimit ) ( ) ;
let destinationStream = fs . createWriteStream ( downloadPath ) ;
const headers = utils _1 . getDownloadHeaders ( 'application/json' , true , true ) ;
const headers = ( 0 , utils _1 . getDownloadHeaders ) ( 'application/json' , true , true ) ;
// a single GET request is used to download a file
const makeDownloadRequest = ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const client = this . downloadHttpManager . getClient ( httpClientIndex ) ;
@ -749,13 +762,13 @@ class DownloadHttpClient {
if ( retryAfterValue ) {
// Back off by waiting the specified time denoted by the retry-after header
core . info ( ` Backoff due to too many requests, retry # ${ retryCount } . Waiting for ${ retryAfterValue } milliseconds before continuing the download ` ) ;
yield utils _1 . sleep ( retryAfterValue ) ;
yield ( 0 , utils _1 . sleep ) ( retryAfterValue ) ;
}
else {
// Back off using an exponential value that depends on the retry count
const backoffTime = utils _1 . getExponentialRetryTimeInMilliseconds ( retryCount ) ;
const backoffTime = ( 0 , utils _1 . getExponentialRetryTimeInMilliseconds ) ( retryCount ) ;
core . info ( ` Exponential backoff for retry # ${ retryCount } . Waiting for ${ backoffTime } milliseconds before continuing the download ` ) ;
yield utils _1 . sleep ( backoffTime ) ;
yield ( 0 , utils _1 . sleep ) ( backoffTime ) ;
}
core . info ( ` Finished backoff for retry # ${ retryCount } , continuing with download ` ) ;
}
@ -779,7 +792,7 @@ class DownloadHttpClient {
resolve ( ) ;
}
} ) ;
yield utils _1 . rmFile ( fileDownloadPath ) ;
yield ( 0 , utils _1 . rmFile ) ( fileDownloadPath ) ;
destinationStream = fs . createWriteStream ( fileDownloadPath ) ;
} ) ;
// keep trying to download a file until a retry limit has been reached
@ -798,7 +811,7 @@ class DownloadHttpClient {
continue ;
}
let forceRetry = false ;
if ( utils _1 . isSuccessStatusCode ( response . message . statusCode ) ) {
if ( ( 0 , utils _1 . isSuccessStatusCode ) ( response . message . statusCode ) ) {
// The body contains the contents of the file however calling response.readBody() causes all the content to be converted to a string
// which can cause some gzip encoded data to be lost
// Instead of using response.readBody(), response.message is a readableStream that can be directly used to get the raw body contents
@ -806,7 +819,7 @@ class DownloadHttpClient {
const isGzipped = isGzip ( response . message . headers ) ;
yield this . pipeResponseToFile ( response , destinationStream , isGzipped ) ;
if ( isGzipped ||
isAllBytesReceived ( response . message . headers [ 'content-length' ] , yield utils _1 . getFileSize ( downloadPath ) ) ) {
isAllBytesReceived ( response . message . headers [ 'content-length' ] , yield ( 0 , utils _1 . getFileSize ) ( downloadPath ) ) ) {
return ;
}
else {
@ -818,17 +831,17 @@ class DownloadHttpClient {
forceRetry = true ;
}
}
if ( forceRetry || utils _1 . isRetryableStatusCode ( response . message . statusCode ) ) {
if ( forceRetry || ( 0 , utils _1 . isRetryableStatusCode ) ( response . message . statusCode ) ) {
core . info ( ` A ${ response . message . statusCode } response code has been received while attempting to download an artifact ` ) ;
resetDestinationStream ( downloadPath ) ;
// if a throttled status code is received, try to get the retryAfter header value, else differ to standard exponential backoff
utils _1 . isThrottledStatusCode ( response . message . statusCode )
? yield backOff ( utils _1 . tryGetRetryAfterValueTimeInMilliseconds ( response . message . headers ) )
( 0 , utils _1 . isThrottledStatusCode ) ( response . message . statusCode )
? yield backOff ( ( 0 , utils _1 . tryGetRetryAfterValueTimeInMilliseconds ) ( response . message . headers ) )
: yield backOff ( ) ;
}
else {
// Some unexpected response code, fail immediately and stop the download
utils _1 . displayHttpDiagnostics ( response ) ;
( 0 , utils _1 . displayHttpDiagnostics ) ( response ) ;
return Promise . reject ( new Error ( ` Unexpected http ${ response . message . statusCode } during download for ${ artifactLocation } ` ) ) ;
}
}
@ -847,14 +860,14 @@ class DownloadHttpClient {
const gunzip = zlib . createGunzip ( ) ;
response . message
. on ( 'error' , error => {
core . error ( ` An error occurred while attempting to read the response stream ` ) ;
core . info ( ` An error occurred while attempting to read the response stream ` ) ;
gunzip . close ( ) ;
destinationStream . close ( ) ;
reject ( error ) ;
} )
. pipe ( gunzip )
. on ( 'error' , error => {
core . error ( ` An error occurred while attempting to decompress the response stream ` ) ;
core . info ( ` An error occurred while attempting to decompress the response stream ` ) ;
destinationStream . close ( ) ;
reject ( error ) ;
} )
@ -863,14 +876,14 @@ class DownloadHttpClient {
resolve ( ) ;
} )
. on ( 'error' , error => {
core . error ( ` An error occurred while writing a downloaded file to ${ destinationStream . path } ` ) ;
core . info ( ` An error occurred while writing a downloaded file to ${ destinationStream . path } ` ) ;
reject ( error ) ;
} ) ;
}
else {
response . message
. on ( 'error' , error => {
core . error ( ` An error occurred while attempting to read the response stream ` ) ;
core . info ( ` An error occurred while attempting to read the response stream ` ) ;
destinationStream . close ( ) ;
reject ( error ) ;
} )
@ -879,7 +892,7 @@ class DownloadHttpClient {
resolve ( ) ;
} )
. on ( 'error' , error => {
core . error ( ` An error occurred while writing a downloaded file to ${ destinationStream . path } ` ) ;
core . info ( ` An error occurred while writing a downloaded file to ${ destinationStream . path } ` ) ;
reject ( error ) ;
} ) ;
}
@ -900,7 +913,11 @@ exports.DownloadHttpClient = DownloadHttpClient;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -913,7 +930,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -991,7 +1008,7 @@ class HttpManager {
throw new Error ( 'There must be at least one client' ) ;
}
this . userAgent = userAgent ;
this . clients = new Array ( clientCount ) . fill ( utils _1 . createHttpClient ( userAgent ) ) ;
this . clients = new Array ( clientCount ) . fill ( ( 0 , utils _1 . createHttpClient ) ( userAgent ) ) ;
}
getClient ( index ) {
return this . clients [ index ] ;
@ -1000,7 +1017,7 @@ class HttpManager {
// for more information see: https://github.com/actions/http-client/blob/04e5ad73cd3fd1f5610a32116b0759eddf6570d2/index.ts#L292
disposeAndReplaceClient ( index ) {
this . clients [ index ] . dispose ( ) ;
this . clients [ index ] = utils _1 . createHttpClient ( this . userAgent ) ;
this . clients [ index ] = ( 0 , utils _1 . createHttpClient ) ( this . userAgent ) ;
}
disposeAndReplaceAllClients ( ) {
for ( const [ index ] of this . clients . entries ( ) ) {
@ -1061,7 +1078,7 @@ Invalid characters include: ${Array.from(invalidArtifactNameCharacters.values())
These characters are not allowed in the artifact name due to limitations with certain file systems such as NTFS . To maintain file system agnostic behavior , these characters are intentionally not allowed to prevent potential problems with downloads on different file systems . ` );
}
}
core _1 . info ( ` Artifact name is valid! ` ) ;
( 0 , core _1 . info ) ( ` Artifact name is valid! ` ) ;
}
exports . checkArtifactName = checkArtifactName ;
/ * *
@ -1094,7 +1111,11 @@ exports.checkArtifactFilePath = checkArtifactFilePath;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -1107,7 +1128,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -1137,14 +1158,14 @@ function retry(name, operation, customErrorMessages, maxAttempts) {
try {
response = yield operation ( ) ;
statusCode = response . message . statusCode ;
if ( utils _1 . isSuccessStatusCode ( statusCode ) ) {
if ( ( 0 , utils _1 . isSuccessStatusCode ) ( statusCode ) ) {
return response ;
}
// Extra error information that we want to display if a particular response code is hit
if ( statusCode ) {
customErrorInformation = customErrorMessages . get ( statusCode ) ;
}
isRetryable = utils _1 . isRetryableStatusCode ( statusCode ) ;
isRetryable = ( 0 , utils _1 . isRetryableStatusCode ) ( statusCode ) ;
errorMessage = ` Artifact service responded with ${ statusCode } ` ;
}
catch ( error ) {
@ -1154,16 +1175,16 @@ function retry(name, operation, customErrorMessages, maxAttempts) {
if ( ! isRetryable ) {
core . info ( ` ${ name } - Error is not retryable ` ) ;
if ( response ) {
utils _1 . displayHttpDiagnostics ( response ) ;
( 0 , utils _1 . displayHttpDiagnostics ) ( response ) ;
}
break ;
}
core . info ( ` ${ name } - Attempt ${ attempt } of ${ maxAttempts } failed with error: ${ errorMessage } ` ) ;
yield utils _1 . sleep (utils _1 . getExponentialRetryTimeInMilliseconds ( attempt ) ) ;
yield ( 0 , utils _1 . sleep ) (( 0 , utils _1 . getExponentialRetryTimeInMilliseconds ) ( attempt ) ) ;
attempt ++ ;
}
if ( response ) {
utils _1 . displayHttpDiagnostics ( response ) ;
( 0 , utils _1 . displayHttpDiagnostics ) ( response ) ;
}
if ( customErrorInformation ) {
throw Error ( ` ${ name } failed: ${ customErrorInformation } ` ) ;
@ -1172,7 +1193,7 @@ function retry(name, operation, customErrorMessages, maxAttempts) {
} ) ;
}
exports . retry = retry ;
function retryHttpClientRequest ( name , method , customErrorMessages = new Map ( ) , maxAttempts = config _variables _1 . getRetryLimit ( ) ) {
function retryHttpClientRequest ( name , method , customErrorMessages = new Map ( ) , maxAttempts = ( 0 , config _variables _1 . getRetryLimit ) ( ) ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
return yield retry ( name , method , customErrorMessages , maxAttempts ) ;
} ) ;
@ -1214,14 +1235,14 @@ class StatusReporter {
this . totalFileStatus = setInterval ( ( ) => {
// display 1 decimal place without any rounding
const percentage = this . formatPercentage ( this . processedCount , this . totalNumberOfFilesToProcess ) ;
core _1 . info ( ` Total file count: ${ this . totalNumberOfFilesToProcess } ---- Processed file # ${ this . processedCount } ( ${ percentage . slice ( 0 , percentage . indexOf ( '.' ) + 2 ) } %) ` ) ;
( 0 , core _1 . info ) ( ` Total file count: ${ this . totalNumberOfFilesToProcess } ---- Processed file # ${ this . processedCount } ( ${ percentage . slice ( 0 , percentage . indexOf ( '.' ) + 2 ) } %) ` ) ;
} , this . displayFrequencyInMilliseconds ) ;
}
// if there is a large file that is being uploaded in chunks, this is used to display extra information about the status of the upload
updateLargeFileStatus ( fileName , chunkStartIndex , chunkEndIndex , totalUploadFileSize ) {
// display 1 decimal place without any rounding
const percentage = this . formatPercentage ( chunkEndIndex , totalUploadFileSize ) ;
core _1 . info ( ` Uploaded ${ fileName } ( ${ percentage . slice ( 0 , percentage . indexOf ( '.' ) + 2 ) } %) bytes ${ chunkStartIndex } : ${ chunkEndIndex } ` ) ;
( 0 , core _1 . info ) ( ` Uploaded ${ fileName } ( ${ percentage . slice ( 0 , percentage . indexOf ( '.' ) + 2 ) } %) bytes ${ chunkStartIndex } : ${ chunkEndIndex } ` ) ;
}
stop ( ) {
if ( this . totalFileStatus ) {
@ -1248,7 +1269,11 @@ exports.StatusReporter = StatusReporter;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -1261,7 +1286,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -1286,19 +1311,34 @@ exports.createGZipFileInBuffer = exports.createGZipFileOnDisk = void 0;
const fs = _ _importStar ( _ _nccwpck _require _ _ ( 7147 ) ) ;
const zlib = _ _importStar ( _ _nccwpck _require _ _ ( 9796 ) ) ;
const util _1 = _ _nccwpck _require _ _ ( 3837 ) ;
const stat = util _1 . promisify ( fs . stat ) ;
const stat = ( 0 , util _1 . promisify ) ( fs . stat ) ;
/ * *
* GZipping certain files that are already compressed will likely not yield further size reductions . Creating large temporary gzip
* files then will just waste a lot of time before ultimately being discarded ( especially for very large files ) .
* If any of these types of files are encountered then on - disk gzip creation will be skipped and the original file will be uploaded as - is
* /
const gzipExemptFileExtensions = [
'.gz' ,
'.gzip' ,
'.tgz' ,
'.taz' ,
'.Z' ,
'.taZ' ,
'.bz2' ,
'.tbz' ,
'.tbz2' ,
'.tz2' ,
'.lz' ,
'.lzma' ,
'.tlz' ,
'.lzo' ,
'.xz' ,
'.txz' ,
'.zst' ,
'.zstd' ,
'.tzst' ,
'.zip' ,
'.tar.lz' ,
'.tar.gz' ,
'.tar.bz2' ,
'.7z'
'.7z' // 7ZIP
] ;
/ * *
* Creates a Gzip compressed file of an original file at the provided temporary filepath location
@ -1327,7 +1367,7 @@ function createGZipFileOnDisk(originalFilePath, tempFilePath) {
outputStream . on ( 'error' , error => {
// eslint-disable-next-line no-console
console . log ( error ) ;
reject ;
reject (error ) ;
} ) ;
} ) ;
} ) ;
@ -1341,22 +1381,29 @@ exports.createGZipFileOnDisk = createGZipFileOnDisk;
function createGZipFileInBuffer ( originalFilePath ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
return new Promise ( ( resolve ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) {
var e_1 , _a ;
var _a, e _1 , _b , _c ;
const inputStream = fs . createReadStream ( originalFilePath ) ;
const gzip = zlib . createGzip ( ) ;
inputStream . pipe ( gzip ) ;
// read stream into buffer, using experimental async iterators see https://github.com/nodejs/readable-stream/issues/403#issuecomment-479069043
const chunks = [ ] ;
try {
for ( var gzip _1 = _ _asyncValues ( gzip ) , gzip _1 _1 ; gzip _1 _1 = yield gzip _1 . next ( ) , ! gzip _1 _1 . done ; ) {
const chunk = gzip _1 _1 . value ;
chunks . push ( chunk ) ;
for ( var _d = true , gzip _1 = _ _asyncValues ( gzip ) , gzip _1 _1 ; gzip _1 _1 = yield gzip _1 . next ( ) , _a = gzip _1 _1 . done , ! _a ; ) {
_c = gzip _1 _1 . value ;
_d = false ;
try {
const chunk = _c ;
chunks . push ( chunk ) ;
}
finally {
_d = true ;
}
}
}
catch ( e _1 _1 ) { e _1 = { error : e _1 _1 } ; }
finally {
try {
if ( gzip _1 _1 && ! gzip _1 _1 . done && ( _a = gzip _1 . return ) ) yield _a . call ( gzip _1 ) ;
if ( ! _d && ! _a && ( _b = gzip _1 . return ) ) yield _b . call ( gzip _1 ) ;
}
finally { if ( e _1 ) throw e _1 . error ; }
}
@ -1376,7 +1423,11 @@ exports.createGZipFileInBuffer = createGZipFileInBuffer;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -1389,7 +1440,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -1418,10 +1469,10 @@ const http_client_1 = __nccwpck_require__(6255);
const http _manager _1 = _ _nccwpck _require _ _ ( 6527 ) ;
const upload _gzip _1 = _ _nccwpck _require _ _ ( 606 ) ;
const requestUtils _1 = _ _nccwpck _require _ _ ( 755 ) ;
const stat = util _1 . promisify ( fs . stat ) ;
const stat = ( 0 , util _1 . promisify ) ( fs . stat ) ;
class UploadHttpClient {
constructor ( ) {
this . uploadHttpManager = new http _manager _1 . HttpManager ( config _variables _1 . getUploadFileConcurrency ( ) , '@actions/artifact-upload' ) ;
this . uploadHttpManager = new http _manager _1 . HttpManager ( ( 0 , config _variables _1 . getUploadFileConcurrency ) ( ) , '@actions/artifact-upload' ) ;
this . statusReporter = new status _reporter _1 . StatusReporter ( 10000 ) ;
}
/ * *
@ -1437,28 +1488,30 @@ class UploadHttpClient {
} ;
// calculate retention period
if ( options && options . retentionDays ) {
const maxRetentionStr = config _variables _1 . getRetentionDays ( ) ;
parameters . RetentionDays = utils _1 . getProperRetention ( options . retentionDays , maxRetentionStr ) ;
const maxRetentionStr = ( 0 , config _variables _1 . getRetentionDays ) ( ) ;
parameters . RetentionDays = ( 0 , utils _1 . getProperRetention ) ( options . retentionDays , maxRetentionStr ) ;
}
const data = JSON . stringify ( parameters , null , 2 ) ;
const artifactUrl = utils _1 . getArtifactUrl ( ) ;
const artifactUrl = ( 0 , utils _1 . getArtifactUrl ) ( ) ;
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
const client = this . uploadHttpManager . getClient ( 0 ) ;
const headers = utils _1 . getUploadHeaders ( 'application/json' , false ) ;
const headers = ( 0 , utils _1 . getUploadHeaders ) ( 'application/json' , false ) ;
// Extra information to display when a particular HTTP code is returned
// If a 403 is returned when trying to create a file container, the customer has exceeded
// their storage quota so no new artifact containers can be created
const customErrorMessages = new Map ( [
[
http _client _1 . HttpCodes . Forbidden ,
'Artifact storage quota has been hit. Unable to upload any new artifacts'
( 0 , config _variables _1 . isGhes ) ( )
? 'Please reference [Enabling GitHub Actions for GitHub Enterprise Server](https://docs.github.com/en/enterprise-server@3.8/admin/github-actions/enabling-github-actions-for-github-enterprise-server) to ensure Actions storage is configured correctly.'
: 'Artifact storage quota has been hit. Unable to upload any new artifacts'
] ,
[
http _client _1 . HttpCodes . BadRequest ,
` The artifact name ${ artifactName } is not valid. Request URL ${ artifactUrl } `
]
] ) ;
const response = yield requestUtils _1 . retryHttpClientRequest ( 'Create Artifact Container' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . post ( artifactUrl , data , headers ) ; } ) , customErrorMessages ) ;
const response = yield ( 0 , requestUtils _1 . retryHttpClientRequest ) ( 'Create Artifact Container' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . post ( artifactUrl , data , headers ) ; } ) , customErrorMessages ) ;
const body = yield response . readBody ( ) ;
return JSON . parse ( body ) ;
} ) ;
@ -1471,8 +1524,8 @@ class UploadHttpClient {
* /
uploadArtifactToFileContainer ( uploadUrl , filesToUpload , options ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const FILE _CONCURRENCY = config _variables _1 . getUploadFileConcurrency ( ) ;
const MAX _CHUNK _SIZE = config _variables _1 . getUploadChunkSize ( ) ;
const FILE _CONCURRENCY = ( 0 , config _variables _1 . getUploadFileConcurrency ) ( ) ;
const MAX _CHUNK _SIZE = ( 0 , config _variables _1 . getUploadChunkSize ) ( ) ;
core . debug ( ` File Concurrency: ${ FILE _CONCURRENCY } , and Chunk Size: ${ MAX _CHUNK _SIZE } ` ) ;
const parameters = [ ] ;
// by default, file uploads will continue if there is an error unless specified differently in the options
@ -1562,7 +1615,7 @@ class UploadHttpClient {
// with named pipes the file size is reported as zero in that case don't read the file in memory
if ( ! isFIFO && totalFileSize < 65536 ) {
core . debug ( ` ${ parameters . file } is less than 64k in size. Creating a gzip file in-memory to potentially reduce the upload size ` ) ;
const buffer = yield upload _gzip _1 . createGZipFileInBuffer ( parameters . file ) ;
const buffer = yield ( 0 , upload _gzip _1 . createGZipFileInBuffer ) ( parameters . file ) ;
// An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
// it will not properly get reset to the start of the stream if a chunk upload needs to be retried
let openUploadStream ;
@ -1602,7 +1655,7 @@ class UploadHttpClient {
const tempFile = yield tmp . file ( ) ;
core . debug ( ` ${ parameters . file } is greater than 64k in size. Creating a gzip file on-disk ${ tempFile . path } to potentially reduce the upload size ` ) ;
// create a GZip file of the original file being uploaded, the original file should not be modified in any way
uploadFileSize = yield upload _gzip _1 . createGZipFileOnDisk ( parameters . file , tempFile . path ) ;
uploadFileSize = yield ( 0 , upload _gzip _1 . createGZipFileOnDisk ) ( parameters . file , tempFile . path ) ;
let uploadFilePath = tempFile . path ;
// compression did not help with size reduction, use the original file for upload and delete the temp GZip file
// for named pipes totalFileSize is zero, this assumes compression did help
@ -1675,22 +1728,22 @@ class UploadHttpClient {
uploadChunk ( httpClientIndex , resourceUrl , openStream , start , end , uploadFileSize , isGzip , totalFileSize ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
// open a new stream and read it to compute the digest
const digest = yield utils _1 . digestForStream ( openStream ( ) ) ;
const digest = yield ( 0 , utils _1 . digestForStream ) ( openStream ( ) ) ;
// prepare all the necessary headers before making any http call
const headers = utils _1 . getUploadHeaders ( 'application/octet-stream' , true , isGzip , totalFileSize , end - start + 1 , utils _1 . getContentRange ( start , end , uploadFileSize ) , digest ) ;
const headers = ( 0 , utils _1 . getUploadHeaders ) ( 'application/octet-stream' , true , isGzip , totalFileSize , end - start + 1 , ( 0 , utils _1 . getContentRange ) ( start , end , uploadFileSize ) , digest ) ;
const uploadChunkRequest = ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const client = this . uploadHttpManager . getClient ( httpClientIndex ) ;
return yield client . sendStream ( 'PUT' , resourceUrl , openStream ( ) , headers ) ;
} ) ;
let retryCount = 0 ;
const retryLimit = config _variables _1 . getRetryLimit ( ) ;
const retryLimit = ( 0 , config _variables _1 . getRetryLimit ) ( ) ;
// Increments the current retry count and then checks if the retry limit has been reached
// If there have been too many retries, fail so the download stops
const incrementAndCheckRetryLimit = ( response ) => {
retryCount ++ ;
if ( retryCount > retryLimit ) {
if ( response ) {
utils _1 . displayHttpDiagnostics ( response ) ;
( 0 , utils _1 . displayHttpDiagnostics ) ( response ) ;
}
core . info ( ` Retry limit has been reached for chunk at offset ${ start } to ${ resourceUrl } ` ) ;
return true ;
@ -1701,12 +1754,12 @@ class UploadHttpClient {
this . uploadHttpManager . disposeAndReplaceClient ( httpClientIndex ) ;
if ( retryAfterValue ) {
core . info ( ` Backoff due to too many requests, retry # ${ retryCount } . Waiting for ${ retryAfterValue } milliseconds before continuing the upload ` ) ;
yield utils _1 . sleep ( retryAfterValue ) ;
yield ( 0 , utils _1 . sleep ) ( retryAfterValue ) ;
}
else {
const backoffTime = utils _1 . getExponentialRetryTimeInMilliseconds ( retryCount ) ;
const backoffTime = ( 0 , utils _1 . getExponentialRetryTimeInMilliseconds ) ( retryCount ) ;
core . info ( ` Exponential backoff for retry # ${ retryCount } . Waiting for ${ backoffTime } milliseconds before continuing the upload at offset ${ start } ` ) ;
yield utils _1 . sleep ( backoffTime ) ;
yield ( 0 , utils _1 . sleep ) ( backoffTime ) ;
}
core . info ( ` Finished backoff for retry # ${ retryCount } , continuing with upload ` ) ;
return ;
@ -1731,21 +1784,21 @@ class UploadHttpClient {
// Always read the body of the response. There is potential for a resource leak if the body is not read which will
// result in the connection remaining open along with unintended consequences when trying to dispose of the client
yield response . readBody ( ) ;
if ( utils _1 . isSuccessStatusCode ( response . message . statusCode ) ) {
if ( ( 0 , utils _1 . isSuccessStatusCode ) ( response . message . statusCode ) ) {
return true ;
}
else if ( utils _1 . isRetryableStatusCode ( response . message . statusCode ) ) {
else if ( ( 0 , utils _1 . isRetryableStatusCode ) ( response . message . statusCode ) ) {
core . info ( ` A ${ response . message . statusCode } status code has been received, will attempt to retry the upload ` ) ;
if ( incrementAndCheckRetryLimit ( response ) ) {
return false ;
}
utils _1 . isThrottledStatusCode ( response . message . statusCode )
? yield backOff ( utils _1 . tryGetRetryAfterValueTimeInMilliseconds ( response . message . headers ) )
( 0 , utils _1 . isThrottledStatusCode ) ( response . message . statusCode )
? yield backOff ( ( 0 , utils _1 . tryGetRetryAfterValueTimeInMilliseconds ) ( response . message . headers ) )
: yield backOff ( ) ;
}
else {
core . error ( ` Unexpected response. Unable to upload chunk to ${ resourceUrl } ` ) ;
utils _1 . displayHttpDiagnostics ( response ) ;
( 0 , utils _1 . displayHttpDiagnostics ) ( response ) ;
return false ;
}
}
@ -1758,14 +1811,14 @@ class UploadHttpClient {
* /
patchArtifactSize ( size , artifactName ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const resourceUrl = new url _1 . URL ( utils _1 . getArtifactUrl ( ) ) ;
const resourceUrl = new url _1 . URL ( ( 0 , utils _1 . getArtifactUrl ) ( ) ) ;
resourceUrl . searchParams . append ( 'artifactName' , artifactName ) ;
const parameters = { Size : size } ;
const data = JSON . stringify ( parameters , null , 2 ) ;
core . debug ( ` URL is ${ resourceUrl . toString ( ) } ` ) ;
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
const client = this . uploadHttpManager . getClient ( 0 ) ;
const headers = utils _1 . getUploadHeaders ( 'application/json' , false ) ;
const headers = ( 0 , utils _1 . getUploadHeaders ) ( 'application/json' , false ) ;
// Extra information to display when a particular HTTP code is returned
const customErrorMessages = new Map ( [
[
@ -1774,7 +1827,7 @@ class UploadHttpClient {
]
] ) ;
// TODO retry for all possible response codes, the artifact upload is pretty much complete so it at all costs we should try to finish this
const response = yield requestUtils _1 . retryHttpClientRequest ( 'Finalize artifact upload' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . patch ( resourceUrl . toString ( ) , data , headers ) ; } ) , customErrorMessages ) ;
const response = yield ( 0 , requestUtils _1 . retryHttpClientRequest ) ( 'Finalize artifact upload' , ( ) => _ _awaiter ( this , void 0 , void 0 , function * ( ) { return client . patch ( resourceUrl . toString ( ) , data , headers ) ; } ) , customErrorMessages ) ;
yield response . readBody ( ) ;
core . debug ( ` Artifact ${ artifactName } has been successfully uploaded, total size in bytes: ${ size } ` ) ;
} ) ;
@ -1792,7 +1845,11 @@ exports.UploadHttpClient = UploadHttpClient;
var _ _createBinding = ( this && this . _ _createBinding ) || ( Object . create ? ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
Object . defineProperty ( o , k2 , { enumerable : true , get : function ( ) { return m [ k ] ; } } ) ;
var desc = Object . getOwnPropertyDescriptor ( m , k ) ;
if ( ! desc || ( "get" in desc ? ! m . _ _esModule : desc . writable || desc . configurable ) ) {
desc = { enumerable : true , get : function ( ) { return m [ k ] ; } } ;
}
Object . defineProperty ( o , k2 , desc ) ;
} ) : ( function ( o , m , k , k2 ) {
if ( k2 === undefined ) k2 = k ;
o [ k2 ] = m [ k ] ;
@ -1805,7 +1862,7 @@ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (
var _ _importStar = ( this && this . _ _importStar ) || function ( mod ) {
if ( mod && mod . _ _esModule ) return mod ;
var result = { } ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
if ( mod != null ) for ( var k in mod ) if ( k !== "default" && Object . prototype. hasOwnProperty. call ( mod , k ) ) _ _createBinding ( result , mod , k ) ;
_ _setModuleDefault ( result , mod ) ;
return result ;
} ;
@ -1827,12 +1884,12 @@ function getUploadSpecification(artifactName, rootDirectory, artifactFiles) {
if ( ! fs . existsSync ( rootDirectory ) ) {
throw new Error ( ` Provided rootDirectory ${ rootDirectory } does not exist ` ) ;
}
if ( ! fs . l statSync( rootDirectory ) . isDirectory ( ) ) {
if ( ! fs . statSync( rootDirectory ) . isDirectory ( ) ) {
throw new Error ( ` Provided rootDirectory ${ rootDirectory } is not a valid directory ` ) ;
}
// Normalize and resolve, this allows for either absolute or relative paths to be used
rootDirectory = path _1 . normalize ( rootDirectory ) ;
rootDirectory = path _1 . resolve ( rootDirectory ) ;
rootDirectory = ( 0 , path _1 . normalize ) ( rootDirectory ) ;
rootDirectory = ( 0 , path _1 . resolve ) ( rootDirectory ) ;
/ *
Example to demonstrate behavior
@ -1856,16 +1913,16 @@ function getUploadSpecification(artifactName, rootDirectory, artifactFiles) {
if ( ! fs . existsSync ( file ) ) {
throw new Error ( ` File ${ file } does not exist ` ) ;
}
if ( ! fs . l statSync( file ) . isDirectory ( ) ) {
if ( ! fs . statSync( file ) . isDirectory ( ) ) {
// Normalize and resolve, this allows for either absolute or relative paths to be used
file = path _1 . normalize ( file ) ;
file = path _1 . resolve ( file ) ;
file = ( 0 , path _1 . normalize ) ( file ) ;
file = ( 0 , path _1 . resolve ) ( file ) ;
if ( ! file . startsWith ( rootDirectory ) ) {
throw new Error ( ` The rootDirectory: ${ rootDirectory } is not a parent directory of the file: ${ file } ` ) ;
}
// Check for forbidden characters in file paths that will be rejected during upload
const uploadPath = file . replace ( rootDirectory , '' ) ;
path _and _artifact _name _validation _1 . checkArtifactFilePath ( uploadPath ) ;
( 0 , path _and _artifact _name _validation _1 . checkArtifactFilePath ) ( uploadPath ) ;
/ *
uploadFilePath denotes where the file will be uploaded in the file container on the server . During a run , if multiple artifacts are uploaded , they will all
be saved in the same container . The artifact name is used as the root directory in the container to separate and distinguish uploaded artifacts
@ -1878,12 +1935,12 @@ function getUploadSpecification(artifactName, rootDirectory, artifactFiles) {
* /
specifications . push ( {
absoluteFilePath : file ,
uploadFilePath : path _1 . join ( artifactName , uploadPath )
uploadFilePath : ( 0 , path _1 . join ) ( artifactName , uploadPath )
} ) ;
}
else {
// Directories are rejected by the server during upload
core _1 . debug ( ` Removing ${ file } from rawSearchResults because it is a directory ` ) ;
( 0 , core _1 . debug ) ( ` Removing ${ file } from rawSearchResults because it is a directory ` ) ;
}
}
return specifications ;
@ -1928,10 +1985,10 @@ function getExponentialRetryTimeInMilliseconds(retryCount) {
throw new Error ( 'RetryCount should not be negative' ) ;
}
else if ( retryCount === 0 ) {
return config _variables _1 . getInitialRetryIntervalInMilliseconds ( ) ;
return ( 0 , config _variables _1 . getInitialRetryIntervalInMilliseconds ) ( ) ;
}
const minTime = config _variables _1 . getInitialRetryIntervalInMilliseconds ( ) * config _variables _1 . getRetryMultiplier ( ) * retryCount ;
const maxTime = minTime * config _variables _1 . getRetryMultiplier ( ) ;
const minTime = ( 0 , config _variables _1 . getInitialRetryIntervalInMilliseconds ) ( ) * ( 0 , config _variables _1 . getRetryMultiplier ) ( ) * retryCount ;
const maxTime = minTime * ( 0 , config _variables _1 . getRetryMultiplier ) ( ) ;
// returns a random number between the minTime (inclusive) and the maxTime (exclusive)
return Math . trunc ( Math . random ( ) * ( maxTime - minTime ) + minTime ) ;
}
@ -1999,13 +2056,13 @@ function tryGetRetryAfterValueTimeInMilliseconds(headers) {
if ( headers [ 'retry-after' ] ) {
const retryTime = Number ( headers [ 'retry-after' ] ) ;
if ( ! isNaN ( retryTime ) ) {
core _1 . info ( ` Retry-After header is present with a value of ${ retryTime } ` ) ;
( 0 , core _1 . info ) ( ` Retry-After header is present with a value of ${ retryTime } ` ) ;
return retryTime * 1000 ;
}
core _1 . info ( ` Returned retry-after header value: ${ retryTime } is non-numeric and cannot be used ` ) ;
( 0 , core _1 . info ) ( ` Returned retry-after header value: ${ retryTime } is non-numeric and cannot be used ` ) ;
return undefined ;
}
core _1 . info ( ` No retry-after header was found. Dumping all headers for diagnostic purposes ` ) ;
( 0 , core _1 . info ) ( ` No retry-after header was found. Dumping all headers for diagnostic purposes ` ) ;
// eslint-disable-next-line no-console
console . log ( headers ) ;
return undefined ;
@ -2089,13 +2146,13 @@ function getUploadHeaders(contentType, isKeepAlive, isGzip, uncompressedLength,
exports . getUploadHeaders = getUploadHeaders ;
function createHttpClient ( userAgent ) {
return new http _client _1 . HttpClient ( userAgent , [
new auth _1 . BearerCredentialHandler ( config _variables _1 . getRuntimeToken ( ) )
new auth _1 . BearerCredentialHandler ( ( 0 , config _variables _1 . getRuntimeToken ) ( ) )
] ) ;
}
exports . createHttpClient = createHttpClient ;
function getArtifactUrl ( ) {
const artifactUrl = ` ${ config _variables _1 . getRuntimeUrl ( ) } _apis/pipelines/workflows/ ${ config _variables _1 . getWorkFlowRunId ( ) } /artifacts?api-version= ${ getApiVersion ( ) } ` ;
core _1 . debug ( ` Artifact Url: ${ artifactUrl } ` ) ;
const artifactUrl = ` ${ ( 0 , config _variables _1 . getRuntimeUrl ) ( ) } _apis/pipelines/workflows/ ${ ( 0 , config _variables _1 . getWorkFlowRunId ) ( ) } /artifacts?api-version= ${ getApiVersion ( ) } ` ;
( 0 , core _1 . debug ) ( ` Artifact Url: ${ artifactUrl } ` ) ;
return artifactUrl ;
}
exports . getArtifactUrl = getArtifactUrl ;
@ -2109,7 +2166,7 @@ exports.getArtifactUrl = getArtifactUrl;
* Other information such as the headers , the response code and message might be useful , so this is displayed .
* /
function displayHttpDiagnostics ( response ) {
core _1 . info ( ` ##### Begin Diagnostic HTTP information #####
( 0 , core _1 . info ) ( ` ##### Begin Diagnostic HTTP information #####
Status Code : $ { response . message . statusCode }
Status Message : $ { response . message . statusMessage }
Header Information : $ { JSON . stringify ( response . message . headers , undefined , 2 ) }
@ -2137,7 +2194,7 @@ exports.createEmptyFilesForArtifact = createEmptyFilesForArtifact;
function getFileSize ( filePath ) {
return _ _awaiter ( this , void 0 , void 0 , function * ( ) {
const stats = yield fs _1 . promises . stat ( filePath ) ;
core _1 . debug ( ` ${ filePath } size:( ${ stats . size } ) blksize:( ${ stats . blksize } ) blocks:( ${ stats . blocks } ) ` ) ;
( 0 , core _1 . debug ) ( ` ${ filePath } size:( ${ stats . size } ) blksize:( ${ stats . blksize } ) blocks:( ${ stats . blocks } ) ` ) ;
return stats . size ;
} ) ;
}
@ -2156,7 +2213,7 @@ function getProperRetention(retentionInput, retentionSetting) {
if ( retentionSetting ) {
const maxRetention = parseInt ( retentionSetting ) ;
if ( ! isNaN ( maxRetention ) && maxRetention < retention ) {
core _1 . warning ( ` Retention days is greater than the max value allowed by the repository setting, reduce retention to ${ maxRetention } days ` ) ;
( 0 , core _1 . warning ) ( ` Retention days is greater than the max value allowed by the repository setting, reduce retention to ${ maxRetention } days ` ) ;
retention = maxRetention ;
}
}