In linux I am able to sync files like this:
https://serverfault.com/questions/682708/copy-directory-structure-intact-to-aws-s3-bucket
Now on windows using c# this is how I upload a file:
using (var fileTransferUtility = new TransferUtility(_credentials.AccessKeyId, _credentials.SecretAccessKey, _region))
{
using (FileStream fileToUpload = new FileStream(fileLocation, FileMode.Open, FileAccess.Read))
{
var fileTransferUtilityRequest = new TransferUtilityUploadRequest
{
BucketName = bucketName,
InputStream = fileToUpload,
StorageClass = S3StorageClass.ReducedRedundancy,
Key = key,
CannedACL = S3CannedACL.PublicRead,
};
fileTransferUtility.Upload(fileTransferUtilityRequest);
}
}
How can I sync a directory instead of just uploading a file using c#?
Here is how you upload a directory to S3 using C#,
try
{
TransferUtilityUploadDirectoryRequest request = new TransferUtilityUploadDirectoryRequest
{
BucketName = bucket,
Directory = uploadDirectory,
SearchOption = System.IO.SearchOption.AllDirectories,
CannedACL = S3CannedACL.PublicRead
};
_transferUtility.UploadDirectory(request);
return true;
}
catch (Exception exception)
{
//Log Exception
return false;
}
More examples can be found from this blog,
https://www.samuelnmensah.com/blog/upload-delete-entire-directory-amazon-s3-using-transfer-utility/
Cached version of the link from google:
http://webcache.googleusercontent.com/search?q=cache:b1CN7MxwkLwJ:samuelnmensah.com/uploading-and-deleting-an-entire-directory-to-aws-s3-using-transfer-utility/+&cd=1&hl=en&ct=clnk&gl=us
Hope it helps.
Related
I'm trying to write a lambda function which would zip all the s3 objects present in Download folder in a single zip file and then move that zip file to BatchDownload folder in the same s3 bucket.
ListObjectsRequest downloadS3Object = new ListObjectsRequest
{
BucketName = sample,
Prefix = download
};
ListObjectsResponse downloadResponse = s3Client.ListObjectsAsync(downloadS3Object).Result;
List<string> downloadS3ObjectKeys = downloadResponse.S3Objects.Where(x => !string.IsNullOrEmpty(Path.GetFileName(x.Key)))
.Select(s3Object => s3Object.Key)
.ToList();
foreach (string downloadS3ObjectKey in downloadS3ObjectKeys)
{
ListObjectsRequest checkBatchDownload = new ListObjectsRequest
{
BucketName = sample,
Prefix = batchDownload
};
ListObjectsResponse s3ObjectResponse = s3Client.ListObjectsAsync(checkBatchDownload).Result;
bool IsArchived = false;
if (s3ObjectResponse.S3Objects.Count <= 0)
{
PutObjectRequest createBatchFolder = new PutObjectRequest()
{
BucketName = sample,
Key = batchDownload
};
s3Client.PutObjectAsync(createBatchFolder);
}
In the above code I'm getting all the objects from download folder and then looping through each of the object keys. I don't understand how to zip all the object keys in a single zip file. Is there a better way to do this without getting the object keys separately.
Can you please help with the code to zip all the objects of download folder in a zip file and move that file to a new folder.
I'm not sure why you appear to be calling ListObjects again, as well as just re-uploading the same objects again, but it doesn't seem right.
It seems you want to download all your objects, place them in a zip archive, and re-upload it.
So you need something like the following:
var downloadS3Object = new ListObjectsRequest
{
BucketName = sample,
Prefix = download
};
List<string> downloadS3ObjectKeys;
using (var downloadResponse = await s3Client.ListObjectsAsync(downloadS3Object))
{
downloadS3ObjectKeys = downloadResponse.S3Objects
.Where(x => !string.IsNullOrEmpty(Path.GetFileName(x.Key)))
.Select(s3Object => s3Object.Key)
.ToList();
}
var stream = new MemoryStream();
using (var zip = new ZipArchive(stream, ZipArchiveMode.Update, true))
{
foreach (string downloadS3ObjectKey in downloadS3ObjectKeys)
{
var getObject = new GetObjectRequest
{
BucketName = sample,
Key = downloadS3ObjectKey,
};
var entry = zip.CreateEntry(downloadS3ObjectKey);
using (var zipStream = entry.Open())
using (var objectResponse = await s3Client.GetObjectAsync(getObject))
using (var objectStream = objectResponse.ResponseStream)
{
await objectStream.CopyToAsync(zip);
}
}
}
stream.Position = 0; // reset the memorystream to the beginning
var createBatchFolder = new PutObjectRequest()
{
BucketName = sample,
Key = batchDownload,
InputStream = stream,
};
using (await s3Client.PutObjectAsync(createBatchFolder))
{ //
}
Note the use of using to dispose things, and also do not use .Result as you may deadlock, instead use await.
I'm playing with Azure Blob Storage and I'm wondering if I can optimize this code a bit.
As you can see I'm creating folder and then uploading content to that folder before sending to Azure Storage and upon competition I'm deleting that folder it seems a bit redundant to be fair.
Now, I'm wondering if I can skip this few steps and just upload stream to azure without copying first to root folder?
Here is the code:
public async Task UploadBlobFile(IFormFile file, BlobMetadata metadata, string containerName)
{
var containerClient = _blobServiceClient.GetBlobContainerClient(containerName);
BlobHttpHeaders headers = new BlobHttpHeaders
{
ContentType = "application/pdf",
ContentLanguage = "hr-HR",
};
if (file.Length > 0)
{
try
{
var rootFolder = Path.Combine(_hostingEnvironment.WebRootPath, "upload");
if (!Directory.Exists(rootFolder))
{
Directory.CreateDirectory(rootFolder);
}
// create folder if doesnt exists
var filePath = Path.Combine(rootFolder, file.FileName);
using (var stream = new FileStream(filePath, FileMode.Create))
{
await file.CopyToAsync(stream);
// set cursor to the beginning of the stream.
stream.Position = 0;
var metadataProperties = new Dictionary<string, string>
{
{ MetadataValues.Id, metadata.Id },
{ MetadataValues.Name, metadata.Name },
{ MetadataValues.UniqueName, metadata.UniqueName }
};
var blobClient = containerClient.GetBlobClient(file.FileName);
await blobClient.UploadAsync(stream, headers, metadataProperties);
}
if (File.Exists(filePath))
{
File.Delete(filePath);
}
}
catch (Exception ex)
{
throw ex;
}
}
}
As fellow Bradley Uffner and pinkfloydx33 pointed out, code above can be optimized like this
public async Task UploadBlobFile(IFormFile file, BlobMetadata metadata, string containerName)
{
var containerClient = _blobServiceClient.GetBlobContainerClient(containerName);
BlobHttpHeaders headers = new BlobHttpHeaders
{
ContentType = "application/pdf",
ContentLanguage = "hr-HR",
};
if (file.Length > 0)
{
using (var stream = file.OpenReadStream())
{
// set cursor to the beginning of the stream.
stream.Position = 0;
var metadataProperties = new Dictionary<string, string>
{
{ MetadataValues.Id, metadata.Id },
{ MetadataValues.Name, metadata.Name },
{ MetadataValues.UniqueName, metadata.UniqueName }
};
var blobClient = containerClient.GetBlobClient(file.FileName);
await blobClient.UploadAsync(stream, headers, metadataProperties);
}
}
}
I have create a Web Api project in C# using AWS Lampda.
The Api using to upload media file to S3 bucket => SNS on upload terminate.
I tryed the code in localhost and all right, the files was uploaded in S3 bucket and. Its readable..
I production mode, the lamda work with only pdf or txt file, the png,jpeg,zip,etc file is broken. Ive noted that the size of file is double for png,jpeg,zip. I dont know why.
public UploadResponse UploadFile(UploadRequest request)
{
AmazonS3Client client = new AmazonS3Client(Settings.GetAppKey(), Settings.GetSecretKey(),
RegionEndpoint.GetBySystemName(request.RegionS3));
if (request.File.Length > 0)
{
if (string.IsNullOrEmpty(request.FileName))
{
request.FileName = Guid.NewGuid().ToString();
}
MemoryStream inputFile = new MemoryStream();
using (inputFile)
{
request.File.CopyTo(inputFile);
var d = request.File.ContentType;
var s = request.File.Length;
var uploadRequest = new TransferUtilityUploadRequest
{
InputStream = inputFile,
ContentType = request.File.ContentType,
Key = request.FileName,
BucketName = request.BucketName,
CannedACL = S3CannedACL.PublicRead
};
TransferUtility fileTransferToS3 = new TransferUtility(client);
fileTransferToS3.UploadAsync(uploadRequest).Wait();
};
}
return UploadResponse.CreateUploadResponse(request.File.ContentType, request.FileName);
}
protected override void Init(IWebHostBuilder builder)
{
RegisterResponseContentEncodingForContentType("multipart/form-data", ResponseContentEncoding.Base64);
builder
.UseStartup<Startup>();
}
I am using SSH.NET library to download a file from SFTP server. When I gave it full file name it works. But I want to download a file with prefix name and in that folder, the prefix name is POS_ETH_SE7*. There will be always one file. After I download it, I move it to another folder. Here is my method:
var auth = new PasswordAuthenticationMethod(username, password);
var connectionInfo = new ConnectionInfo(ipAddress, port, auth);
// Upload File
using (var sftp = new SftpClient(connectionInfo))
{
string pathLocalFile =
Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.Desktop),
"POS_ETH_SE7.ics");
sftp.Connect();
Console.WriteLine("Downloading {0}", remoteFilePath);
using (Stream fileStream = File.OpenWrite(pathLocalFile))
using (StreamWriter writer = new StreamWriter(fileStream))
{
try
{
sftp.DownloadFile(remoteFilePath, fileStream);
}
catch (SftpPathNotFoundException ex)
{
}
}
try
{
var inFile = sftp.Get(remoteFilePath);
inFile.MoveTo(remoteMoveFileToPath + "/POS_ETH_SE7.xml");
}
catch (SftpPathNotFoundException ex)
{
Console.WriteLine("\nnothing to update...\n");
}
sftp.Disconnect();
}
Start with the code from the following question and add the additional constraint on the file name prefix.
Downloading a directory using SSH.NET SFTP in C#
const string prefix = "POS_ETH_SE7";
IEnumerable<SftpFile> files = client.ListDirectory(remotePath);
files = files.Where(file => file.Name.StartsWith(prefix));
foreach (SftpFile file in files)
{
string pathLocalFile = Path.Combine(localPath, file.Name);
using (var stream = File.Create(pathLocalFile))
{
client.DownloadFile(file.FullName, stream);
}
// If you want to archive the downloaded files:
string archivePath = remoteMoveFileToPath + "/" + file.Name;
client.RenameFile(file.FullName, archivePath);
}
Or use a more powerful SFTP library. For example with my WinSCP .NET assembly, you can do the same with a single call to Session.GetFilesToDirectory:
session.GetFilesToDirectory(remotePath, localPath, prefix + "*").Check();
using (var sftp = new SftpClient(connectionInfo))
{
sftp.Connect();
IEnumerable<SftpFile> files = sftp.ListDirectory(configSftpClient.remoteFilePath);
files = files.Where(file => file.Name.StartsWith(configSftpClient.filePrefix));
foreach (SftpFile file in files)
{
string pathLocalFile = Path.Combine(configSftpClient.localFilePath, file.Name);
try
{
using (var stream = File.Create(pathLocalFile))
{
sftp.DownloadFile(file.FullName, stream);
var movableFile = sftp.Get(file.FullName);
Console.WriteLine(file.FullName);
movableFile.MoveTo(configSftpClient.remoteMoveFileToPath + "/" + file.Name);
stream.Close();
}
}
catch(Exception ex)
{
Console.WriteLine("file used by other");
}
}
I am trying to upload my files to the s3 bucket, but i don't want that file to be uploaded from my local machine instead when some one uses the application and uploads a file the same should be directly uploaded to my s3 bucket.!! Is there a way to do this?(code should be in .net)
string filekey = filePath.Substring(filePath.LastIndexOf('\\') + 1);
using (MemoryStream filebuffer = new MemoryStream(File.ReadAllBytes(filePath)))
{
PutObjectRequest putRequest = new PutObjectRequest
{
BucketName = this.awsBucketName,
Key = "GUARD1" + "/" + filekey,
InputStream = filebuffer,
ContentType = "application/pkcs8",
};
This is what i am doing..which in turn creates a folder in the bucket and takes the file path from the local machine and the same is uploaded to the bucket.
What i need is that the file shouldn't be saved in the local machine and instead be taken directly from the application to the s3 bucket.
This is WriteIntoS3 method:
string filekey = filePath.Substring(filePath.LastIndexOf('\\') + 1);
using (MemoryStream filebuffer = new MemoryStream(File.ReadAllBytes(filePath)))
{
PutObjectRequest putRequest = new PutObjectRequest
{
BucketName = this.awsBucketName,
Key = "GUARD1" + "/" + filekey,
InputStream = filebuffer,
ContentType = "application/pkcs8",
};
client.PutObject(putRequest);
GetPreSignedUrlRequest expiryUrlRequest = new GetPreSignedUrlRequest();
expiryUrlRequest.BucketName = this.awsBucketName;
expiryUrlRequest.Key = filekey;
expiryUrlRequest.Expires = DateTime.Now.AddDays(ExpiryDays);
string url = client.GetPreSignedURL(expiryUrlRequest);
return url;
}
If you don't want to use local files then you can use TransferUtility class to upload a stream directly to S3.
For example:
using Amazon.S3.Transfer;
using System.IO;
class Program
{
static void Main(string[] args)
{
var client = new Amazon.S3.AmazonS3Client();
using (var ms = new MemoryStream()) // Load the data into memorystream from a data source other than a file
{
using (var transferUtility = new TransferUtility(client))
{
transferUtility.Upload(ms, "bucket", "key");
}
}
}
}