How do I separate my code into their own classes and still have it function the same? This is currently what my code looks like.
using System;
using System.Collections.Generic;
using System.Xml;
using XCENT.JobServer.JobPlugIn;
using System.IO;
using HPD.API.Utility.DataAccess;
namespace DataPurge
{
public class Purge : IJob, IJobControl {
public IJobControl JobControl { get { return ( this ); } }
public int MaxInstanceCount { get { return 1; } }
public string Name { get { return "DataPurge"; } }
public Purge() { }
public void Run( string XmlFragment ) {
XmlNode xmlNode = null;
try
{
xmlNode = Common.ConstructXmlNodeFromString(XmlFragment, "Params");
var list = DataList();
foreach (var item in list)
{
var factory = new PurgerFactory(item);
IPurger purge = factory.Purger;
purge.Purge();
purge = null;
factory = null;
}
}
catch (Exception ex)
{
throw;
}
}
public interface IPurger
{
void Purge();
}
public enum PurgeType
{
File,
Database,
}
public class FilePurger : IPurger
{
private Parameters parameter;
public FilePurger(Parameters parameter)
{
this.parameter = parameter;
}
public void Purge()
{
var files = new DirectoryInfo(parameter.FilePath).GetFiles();
foreach (var file in files)
{
if (DateTime.Now - file.CreationTime > TimeSpan.FromDays(7))
{
File.Delete(file.FullName);
}
}
}
}
public class DbPurger : IPurger
{
private Parameters parameter;
public DbPurger(Parameters parameter)
{
this.parameter = parameter;
}
public void Purge()
{
var access = new SqlDataAccess();
var sqlParams = new Dictionary<string, object>();
sqlParams.Add("#OlderThanDays", parameter.OlderThanDays);
access.ExecuteNonQuery(parameter.CString, parameter.SPName, sqlParams, 30, false);
}
}
private List<Parameters> DataList()
{
var sqlParams = new SqlDataAccess();
var list = sqlParams.GetDataTableAsList<Parameters>("Data Source = MYSERVER; Initial Catalog = MYDATABASE; User ID = UID; Password = PASSWORD;", "purge.spoDataTable", null);
return list;
}
public class PurgerFactory
{
public IPurger Purger { get; set; }
public PurgerFactory(Parameters parameter)
{
PurgeType type = (PurgeType)Enum.Parse(typeof(PurgeType), parameter.PurgeType);
switch (type)
{
case PurgeType.File:
Purger = new FilePurger(parameter);
break;
case PurgeType.Database:
Purger = new DbPurger(parameter);
break;
default:
throw new NotImplementedException();
}
}
}
/// <summary>
/// Used to submit a job via the job monitor
/// </summary>
public XmlNode JobXMLNode => Common.ConstructXmlNodeFromString("" +
"<JobParams>" +
" <Param Name=\"InfrastructureAPI\" DataType=\"String\">" +
" <Description>Infrastructure API URL.</Description>" +
" </Param>" +
" <Param Name=\"EnvironmentName\" DataType=\"String\">" +
" <Description>The current environment.</Description>" +
" </Param>" +
"</JobParams>",
"JobParams");
}
}
Currently all parts of the program are stuffed into this one single class. I want to separate them out into their own separate classes to make the code much cleaner but still have it function the same. I'm still a beginner coder and don't know the first place to start. Any help would be much appreciated!
You should create a file IPurger.cs for the interface IPurger, then a file FilePurger.cs for the class FilePurger, the file DbPurger.cs for the class DbPurger and lastly PurgerFactory.cs for the class PurgerFactory.
That should clean up your code quite well.
If that enum is used from multiple places, you may want to place it in its own class too, perhaps a generic Enums.cs.
Related
My use case:
In a single threaded application, I need to serialize arbitrary classes for logging purposes.
The arbitrary classes are predominantly translated in an automated way from a massive VB6 application into .NET.
If serialized without a timeout, the serialization method will loop until it runs out of memory.
This is what I have currently:
internal class Serializer
{
private readonly log4net.ILog log = log4net.LogManager.GetLogger(System.Reflection.MethodBase.GetCurrentMethod().DeclaringType);
public volatile string result = null;
public volatile Func<string> toExecute = null;
public Thread thread;
public ManualResetEventSlim messageToSender = new ManualResetEventSlim(false);
public ManualResetEventSlim messageToReceiver = new ManualResetEventSlim(false);
public Serializer()
{
thread = new Thread(new ThreadStart(run));
thread.Start();
}
~Serializer()
{
try
{
if (messageToSender != null) messageToSender.Dispose();
}
catch { };
try
{
if (messageToReceiver != null) messageToReceiver.Dispose();
}
catch { };
}
public volatile bool ending = false;
public void run()
{
while (!ending)
{
try
{
if (toExecute != null)
{
result = toExecute();
}
messageToReceiver.Reset();
messageToSender.Set();
messageToReceiver.Wait();
}
catch (ThreadInterruptedException)
{
log.Warn("Serialization interrupted");
break;
}
catch (ThreadAbortException)
{
Thread.ResetAbort();
result = null;
}
catch (Exception ex)
{
log.Error("Error in Serialization", ex);
Console.WriteLine(ex);
break;
}
}
}
}
public class LocalStructuredLogging
{
private static volatile Serializer _serializer;
private static Serializer serializer
{
get
{
if (_serializer == null)
{
_serializer = new Serializer();
}
return _serializer;
}
}
public void LogStucturedEnd()
{
try
{
if (serializer != null)
{
serializer.ending = true;
serializer.thread.Interrupt();
}
}
catch { }
}
internal ConcurrentDictionary<long, bool> disallowedToSerialize = new ConcurrentDictionary<long, bool>();
public string TrySerialize<T>(T payload, [CallerLineNumber] int line = 0)
{
long hashEl = typeof(T).Name.GetHashCode() * line;
bool dummy;
unchecked
{
if (disallowedToSerialize.TryGetValue(hashEl, out dummy))
{
return "°,°";
}
}
serializer.toExecute = () =>
{
try
{
return Newtonsoft.Json.JsonConvert.SerializeObject(payload, new Newtonsoft.Json.JsonSerializerSettings() { ReferenceLoopHandling = Newtonsoft.Json.ReferenceLoopHandling.Ignore });
}
catch (Exception)
{
disallowedToSerialize.TryAdd(hashEl, false);
return "°°°";
}
};
try
{
serializer.messageToSender.Reset();
serializer.messageToReceiver.Set();
if (serializer.messageToSender.Wait(6000))
{
return Interlocked.Exchange(ref serializer.result, null);
}
serializer.toExecute = null;
serializer.thread.Abort();
serializer.messageToSender.Wait(2000);
disallowedToSerialize.TryAdd(hashEl, false);
return "°§°";
}
catch (Exception)
{
disallowedToSerialize.TryAdd(hashEl, false);
return "°-°";
}
}
}
The code is called as in the following (test is an arbitrary class instance):
var logger = new LocalStructuredLogging();
var rr5 = logger.TrySerialize(test);
Although it seems to do the job, there are some issues with it:
it has a dependency on Thread.Abort
it is time dependent, so it will thus produce varied results on a loaded system
every class instance is treated like every other class instance - no tweaking
...
So, are there any better solutions available ?
Based upon dbc's excellent answer, I managed to create a better timed serializer.
It resolves all 3 issues mentioned above:
public class TimedJsonTextWriter : JsonTextWriter
{
public int? MaxDepth { get; set; }
public TimeSpan? MaxTimeUsed { get; set; }
public int MaxObservedDepth { get; private set; }
private DateTime start = DateTime.Now;
public TimedJsonTextWriter(TextWriter writer, JsonSerializerSettings settings, TimeSpan? maxTimeUsed)
: base(writer)
{
this.MaxDepth = (settings == null ? null : settings.MaxDepth);
this.MaxObservedDepth = 0;
this.MaxTimeUsed = maxTimeUsed;
}
public TimedJsonTextWriter(TextWriter writer, TimeSpan? maxTimeUsed, int? maxDepth = null)
: base(writer)
{
this.MaxDepth = maxDepth;
this.MaxTimeUsed = maxTimeUsed;
}
public override void WriteStartArray()
{
base.WriteStartArray();
CheckDepth();
}
public override void WriteStartConstructor(string name)
{
base.WriteStartConstructor(name);
CheckDepth();
}
public override void WriteStartObject()
{
base.WriteStartObject();
CheckDepth();
}
uint checkDepthCounter = 0;
private void CheckDepth()
{
MaxObservedDepth = Math.Max(MaxObservedDepth, Top);
if (Top > MaxDepth)
throw new JsonSerializationException($"Depth {Top} Exceeds MaxDepth {MaxDepth} at path \"{Path}\"");
unchecked
{
if ((++checkDepthCounter & 0x3ff) == 0 && DateTime.Now - start > MaxTimeUsed)
throw new JsonSerializationException($"Time Usage Exceeded at path \"{Path}\"");
}
}
}
public class LocalStructuredLogging
{
public void LogStucturedEnd()
{
}
internal HashSet<long> disallowedToSerialize = new HashSet<long>();
public string TrySerialize<T>(T payload, int maxDepth = 100, int secondsToTimeout = 2, [CallerLineNumber] int line = 0)
{
long hashEl = typeof(T).Name.GetHashCode() * line;
if (disallowedToSerialize.Contains(hashEl))
{
return "°,°";
}
try
{
var settings = new JsonSerializerSettings { MaxDepth = maxDepth, ReferenceLoopHandling = Newtonsoft.Json.ReferenceLoopHandling.Ignore };
using (var writer = new StringWriter())
{
using (var jsonWriter = new TimedJsonTextWriter(writer, settings, new TimeSpan(0, 0, secondsToTimeout)))
{
JsonSerializer.Create(settings).Serialize(jsonWriter, payload);
// Log the MaxObservedDepth here, if you want to.
}
return writer.ToString();
}
}
catch (Exception)
{
disallowedToSerialize.Add(hashEl);
return "°-°";
}
}
}
The only issue remaining are the Hash collisions, which are easy to solve (e.g. by using the source file name as well or use another type of Collection).
The correct way to run an action timed would be to do something like the following. I would recommend taking a second look at how serialization should work as well :).
/// <summary>
/// Run an action timed.
/// </summary>
/// <param name="action">Action to execute timed.</param>
/// <param name="secondsTimout">Seconds before Task should cancel.</param>
/// <returns></returns>
public static async Task RunTimeout(Action action, int secondsTimout) {
var tokenSource = new CancellationTokenSource();
tokenSource.CancelAfter(TimeSpan.FromSeconds(secondsTimout));
await Task.Run(action, tokenSource.Token);
}
You may also want to return a variable upon the completion of your timed task. That can be done like so...
public static async Task<T> RunTimeout<T>(Func<T> action, int secondsTimout) {
var tokenSource = new CancellationTokenSource();
tokenSource.CancelAfter(TimeSpan.FromSeconds(secondsTimout));
var result = await Task.Run(action, tokenSource.Token);
return result;
}
I'm creating a program which will execute a command after user input.
Some commands I want to implement are: creating, reading a file, getting current working directory etc.
I created a dictionary which will store user input and corresponding command:
public static Dictionary<string, Action<string[]>> Commands { get; set; } = new Dictionary<string, Action<string[]>>()
{
{"pwd", PrintWorkingDirectory },
{"create", CreateFile },
{"print", ReadFile },
};
Unfortunately I have issues with triggering the method:
public void Run()
{
Console.WriteLine("Welcome, type in command.");
string input = null;
do
{
Console.Write("> ");
input = Console.ReadLine();
Execute(input);
} while (input != "exit");
}
public int Execute(string input)
{
if(Commands.Keys.Contains(input))
{
var action = Commands.Values.FirstOrDefault(); //doesn't work, gives '{command} not found'
}
Console.WriteLine($"{input} not found");
return 1;
}
Also I noticed that this solution would not work with method which is not void, but returns something, as for example CreateFile.
public static string CreateFile(string path)
{
Console.WriteLine("Create a file");
string userInput = Console.ReadLine();
try
{
string[] file = userInput.Split(new char[] { ' ' }).Skip(1).ToArray();
string newPath = Path.GetFullPath(Path.Combine(file));
using (FileStream stream = new FileStream(newPath, FileMode.Create, FileAccess.ReadWrite))
{
stream.Close();
}
using (StreamWriter sw = new StreamWriter(newPath))
{
Console.WriteLine("Please type the content.Press Enter to save.");
sw.WriteLine(Console.ReadLine());
sw.Close();
Console.WriteLine("File {0} has been created", newPath);
}
}
catch (Exception)
{
throw;
}
return path;
}
public static void ReadFile(string[] args)
{
Console.WriteLine("Reading file");
string userInput = Console.ReadLine();
string[] file = userInput.Split(new char[] { ' ' }).Skip(1).ToArray();
string newPath = Path.GetFullPath(Path.Combine(file));
string[] lines = File.ReadAllLines(newPath);
foreach (string line in lines)
Console.WriteLine(line);
}
public static void PrintWorkingDirectory(string[] args)
{
var currentDirectory = Directory.GetCurrentDirectory();
Console.WriteLine(currentDirectory);
}
Could somebody advise me how to deal with these issues?
Is it that this dictionary I created does not make much sense at all?
First problem: You're always fetching the first element of the dictionary and are not using the index operator to retrieve the correct value. Therefore change:
if(Commands.Keys.Contains(input))
{
var action = Commands.Values.FirstOrDefault(); //doesn't work, gives '{command} not found'
}
to:
public int Execute(string input)
{
if (Commands.Keys.Contains(input))
{
var action = Commands[input]; //doesn't work, gives '{command} not found'
action?.Invoke(new string[] { });
}
else
{
Console.WriteLine($"{input} not found");
}
return 1;
}
Regarding to your second question about dictionary usage. I think it is ok to use a dictionary to map different commands based on a given key. The alternative would be switch or if constructs, which can be prevented in Object Oriented Programming.
Regarding to your question about string CreateFile(string path). Since C# is strongly typed language your dictionary can only contain objects of type Action<string[]>, so you can't use methods with another signature than that. One solution is to add another dictionary in the form of Dictionary<string,Func<string[], string>. As a result you'll get more and more dictionaries depending on your method signatures. From here on you should think to build to encapsulate your commands in an e.g. CommandInterpreter class, that could offer an API like that:
void Request(string cmdName, string[] cmdParameters);
string GetLastResult();
int GetLastCode();
Update:
Below code shows a possible object oriented solution (I've left out interfaces to make the code more compact):
using System;
using System.Collections.Generic;
using System.Linq;
namespace ConsoleApp1
{
public class Command<T>
{
public string Name { get; }
public T TheCommand { get; }
public Command(string name, T theCommand)
{
Name = name;
TheCommand = theCommand;
}
}
public interface ICommandResult
{
void Ok(Action<ICommandResult> yes, Action<ICommandResult> no);
int Code { get; }
string Description { get; }
}
public abstract class CommandResult : ICommandResult
{
public int Code { get; }
public string Description { get; }
protected CommandResult(int code, string description)
{
Code = code;
Description = description;
}
public abstract void Ok(Action<ICommandResult> yes, Action<ICommandResult> no);
}
public class NullCommandResult : CommandResult
{
public NullCommandResult() : base(-1, "null")
{
}
public override void Ok(Action<ICommandResult> yes, Action<ICommandResult> no) => no?.Invoke(this);
}
public class SuccessCommandResult : CommandResult
{
public SuccessCommandResult(string description) : base(0, description)
{
}
public override void Ok(Action<ICommandResult> yes, Action<ICommandResult> no) => yes?.Invoke(this);
}
public class CommandInterpreter
{
private Dictionary<string, Func<IEnumerable<string>, ICommandResult>> Commands = new Dictionary<string, Func<IEnumerable<string>, ICommandResult>>();
public void RegisterCommand(Command<Func<IEnumerable<string>, ICommandResult>> cmd)
=> Commands.Add(cmd.Name, cmd.TheCommand);
public ICommandResult RunCommand(string name, IEnumerable<string> parameters)
=> Commands.Where(kvp => kvp.Key.Equals(name))
.Select(kvp => kvp.Value)
.DefaultIfEmpty(strArr => new NullCommandResult())
.Single()
.Invoke(parameters);
}
class Program
{
private CommandInterpreter _cmdInterpreter;
private Program()
{
_cmdInterpreter = new CommandInterpreter();
_cmdInterpreter.RegisterCommand(new Command<Func<IEnumerable<string>, ICommandResult>>("pwd", PrintWorkingDirectory));
_cmdInterpreter.RegisterCommand(new Command<Func<IEnumerable<string>, ICommandResult>>("create", CreateFile));
_cmdInterpreter.RegisterCommand(new Command<Func<IEnumerable<string>, ICommandResult>>("print", ReadFile));
}
private static CommandResult ReadFile(IEnumerable<string> arg) => new SuccessCommandResult("File read");
private static CommandResult CreateFile(IEnumerable<string> arg) => new SuccessCommandResult("File xyz created");
private static CommandResult PrintWorkingDirectory(IEnumerable<string> arg) => new SuccessCommandResult("Printed something");
static void Main() => new Program().Run();
private void Run()
{
Console.WriteLine("Welcome, type in command.");
string input;
do
{
Console.Write("> ");
input = Console.ReadLine();
var cmdResult = _cmdInterpreter.RunCommand(input, Enumerable.Empty<string>());
cmdResult.Ok(
r => Console.WriteLine($"Success: {cmdResult.Code}, {cmdResult.Description}"),
r => Console.WriteLine($"FAILED: {cmdResult.Code}, {cmdResult.Description}"));
} while (input != "exit");
}
}
}
Output:
Welcome, type in command.
> pwd
Success: 0, Printed something
> create
Success: 0, File xyz created
> abc
FAILED: -1, null
>
You can just copy the code and play around with it.
Currently i've got this code:
private async Task<bool> IsMentionedInDisposeCallAsync(SyntaxNodeAnalysisContext context, FieldDeclarationSyntax fieldDeclarationSyntax)
{
foreach (var variableDeclaratorSyntax in fieldDeclarationSyntax.Declaration.Variables)
{
var declaredSymbol = context.SemanticModel.GetDeclaredSymbol(variableDeclaratorSyntax);
if (declaredSymbol is IFieldSymbol fieldSymbol)
{
// SymbolFinder.FindReferencesAsync()
var b = fieldSymbol.Locations;
// context.SemanticModel.Compilation.
}
}
return false;
}
And this scenario:
private static readonly string TestSourceImplementsDisposableAndDoesMentionDisposableField = #"
using System;
using System.IO;
namespace ConsoleApplication1
{
public class SampleDisposable : IDisposable
{
public void Dispose()
{
}
}
public class SampleConsumer : IDisposable
{
private SampleDisposable _disposable = new SampleDisposable();
private IDisposable _ms = new MemoryStream();
public void Dispose()
{
_disposable?.Dispose();
_ms?.Dispose();
}
}
}";
Ultimately my desire is to figure out whether a dispose method is accessing a disposable field. Unfortunately i can't seem to find a way to get this working without using SymbolFinder, which requires a solution.
I did something similar with SymbolFinder and it was an easy thing to do - but how do i do it from the functionality available within a diagnostic?
Am i missing something obvious here?
You could simply use the SemanticModel to analyse the type used for the field like this:
private async Task<bool> IsMentionedInDisposeCallAsync(SyntaxNodeAnalysisContext context, FieldDeclarationSyntax fieldDeclarationSyntax)
{
foreach (var variableDeclaratorSyntax in fieldDeclarationSyntax.Declaration.Variables)
{
var declaredSymbol = context.SemanticModel.GetDeclaredSymbol(variableDeclaratorSyntax);
if (declaredSymbol is IFieldSymbol fieldSymbol)
{
var isDisposeable = CheckIsTypeIDisposeable(fieldSymbol.Type as INamedTypeSymbol);
// SymbolFinder.FindReferencesAsync()
var b = fieldSymbol.Locations;
// context.SemanticModel.Compilation.
}
}
return false;
}
private string fullQualifiedAssemblyNameOfIDisposeable = typeof(IDisposable).AssemblyQualifiedName;
private bool CheckIsTypeIDisposeable(INamedTypeSymbol type)
{
// Identify the IDisposable class. You can use any method to do this here
// A type.ToDisplayString() == "System.IDisposable" might do it for you
if(fullQualifiedAssemblyNameOfIDisposeable ==
type.ToDisplayString() + ", " + type.ContainingAssembly.ToDisplayString())
{
return true;
}
if(type.BaseType != null)
{
if (CheckIsTypeIDisposeable(type.BaseType))
{
return true;
}
}
foreach(var #interface in type.AllInterfaces)
{
if (CheckIsTypeIDisposeable(#interface))
{
return true;
}
}
return false;
}
Basically you would search through all interfaces of the class and the base class recursively to find the type corresponding to IDisposeable - which should be somewhere in the hierarchy.
This is a basically a class library project which is somehow exposed as a WCF service. The code below is a part of the Data Access Layer. 'db' is an object of a DataContext class. To save a file, we do the following-
public static Guid SaveFile(FileDetails fileDetails)
{
System.Nullable<Guid> id = null;
SystemDataContext.UsingWrite(db =>
{
db.SaveFileData(fileDetails.RunId, fileDetails.FileData, fileDetails.FileExtension, ref id);
});
return id ?? Guid.Empty;
}
Then, the below would execute-
public static void UsingWrite(Action<SoftCashCreditDBDataContext> action)
{
using (var context = new SystemDataContext())
{
try
{
action(context.Write);
}
catch (Exception ex)
{
DataAccessExceptionHandler.HandleExcetion(ex, Config.DataLayerPolicy);
}
}
}
public SystemDataContext()
{
if (_stack == null)
{
_stack = new Stack<SystemDataContext>();
this.Depth = 1;
this.Read = new SoftCashCreditDBDataContext(Config.ReadDatabaseConnection);
this.Write = new SoftCashCreditDBDataContext(Config.WriteDatabaseConnection);
}
else
{
var parent = _stack.Peek();
/// Increment level of node.
this.Depth = parent.Depth + 1;
/// Copy data context from the parent
this.Read = parent.Read;
this.Write = parent.Write;
}
_stack.Push(this);
}
public int Depth { get; private set; }
public bool IsRoot { get { return this.Depth == 1; } }
[ThreadStatic]
private static Stack<SystemDataContext> _stack = null;
public SoftCashCreditDBDataContext Read { get; private set; }
public SoftCashCreditDBDataContext Write { get; private set; }
#region IDisposable Members
public void Dispose()
{
var context = _stack.Pop();
if (context.IsRoot == true)
{
context.Read.Dispose();
context.Write.Dispose();
_stack = null;
}
}
#endregion
}
They have implemented LINQ to SQL here, and created a DBContext class. The 'SaveFileData()' method is actually part of that class, where it just calls an SP inside to save the file.
What I did not follow-
What exactly does the call to UsingWrite() do here? What is passed to the 'Action action' parameter, and what is it doing?
I understand your confusion. They use 2 delegates.
This is passed to the action parameter:
db =>
{
db.SaveFileData(fileDetails.RunId, fileDetails.FileData, fileDetails.FileExtension, ref id);
}
So when UsingWrite is called, the SoftCashCreditDBDataContext delegate which was set in the Write delegate will call SaveFileData.
A simplified example to help you understand Action:
public void Main()
{
Test(x => Debug.Write(x));
}
private void Test(Action<string> testAction)
{
testAction("Bla");
}
This function will call Debug.Write with the argument x, which is a string that is passed to the test action function.
I'm trying to learn patterns and I've got a job that is screaming for a pattern, I just know it but I can't figure it out. I know the filter type is something that can be abstracted and possibly bridged. I'M NOT LOOKING FOR A CODE REWRITE JUST SUGGESTIONS. I'm not looking for someone to do my job. I would like to know how patterns could be applied to this example.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Data;
using System.IO;
using System.Xml;
using System.Text.RegularExpressions;
namespace CopyTool
{
class CopyJob
{
public enum FilterType
{ TextFilter, RegExFilter, NoFilter }
public FilterType JobFilterType { get; set; }
private string _jobName;
public string JobName { get { return _jobName; } set { _jobName = value; } }
private int currentIndex;
public int CurrentIndex { get { return currentIndex; } }
private DataSet ds;
public int MaxJobs { get { return ds.Tables["Job"].Rows.Count; } }
private string _filter;
public string Filter { get { return _filter; } set { _filter = value; } }
private string _fromFolder;
public string FromFolder
{
get { return _fromFolder; }
set
{
if (Directory.Exists(value))
{ _fromFolder = value; }
else
{ throw new DirectoryNotFoundException(String.Format("Folder not found: {0}", value)); }
}
}
private List<string> _toFolders;
public List<string> ToFolders { get { return _toFolders; } }
public CopyJob()
{
Initialize();
}
private void Initialize()
{
if (ds == null)
{ ds = new DataSet(); }
ds.ReadXml(Properties.Settings.Default.ConfigLocation);
LoadValues(0);
}
public void Execute()
{
ExecuteJob(FromFolder, _toFolders, Filter, JobFilterType);
}
public void ExecuteAll()
{
string OrigPath;
List<string> DestPaths;
string FilterText;
FilterType FilterWay;
foreach (DataRow rw in ds.Tables["Job"].Rows)
{
OrigPath = rw["FromFolder"].ToString();
FilterText = rw["FilterText"].ToString();
switch (rw["FilterType"].ToString())
{
case "TextFilter":
FilterWay = FilterType.TextFilter;
break;
case "RegExFilter":
FilterWay = FilterType.RegExFilter;
break;
default:
FilterWay = FilterType.NoFilter;
break;
}
DestPaths = new List<string>();
foreach (DataRow crw in rw.GetChildRows("Job_ToFolder"))
{
DestPaths.Add(crw["FolderPath"].ToString());
}
ExecuteJob(OrigPath, DestPaths, FilterText, FilterWay);
}
}
private void ExecuteJob(string OrigPath, List<string> DestPaths, string FilterText, FilterType FilterWay)
{
FileInfo[] files;
switch (FilterWay)
{
case FilterType.RegExFilter:
files = GetFilesByRegEx(new Regex(FilterText), OrigPath);
break;
case FilterType.TextFilter:
files = GetFilesByFilter(FilterText, OrigPath);
break;
default:
files = new DirectoryInfo(OrigPath).GetFiles();
break;
}
foreach (string fld in DestPaths)
{
CopyFiles(files, fld);
}
}
public void MoveToJob(int RecordNumber)
{
Save();
LoadValues(RecordNumber - 1);
}
public void AddToFolder(string folderPath)
{
if (Directory.Exists(folderPath))
{ _toFolders.Add(folderPath); }
else
{ throw new DirectoryNotFoundException(String.Format("Folder not found: {0}", folderPath)); }
}
public void DeleteToFolder(int index)
{
_toFolders.RemoveAt(index);
}
public void Save()
{
DataRow rw = ds.Tables["Job"].Rows[currentIndex];
rw["JobName"] = _jobName;
rw["FromFolder"] = _fromFolder;
rw["FilterText"] = _filter;
switch (JobFilterType)
{
case FilterType.RegExFilter:
rw["FilterType"] = "RegExFilter";
break;
case FilterType.TextFilter:
rw["FilterType"] = "TextFilter";
break;
default:
rw["FilterType"] = "NoFilter";
break;
}
DataRow[] ToFolderRows = ds.Tables["Job"].Rows[currentIndex].GetChildRows("Job_ToFolder");
for (int i = 0; i <= ToFolderRows.GetUpperBound(0); i++)
{
ToFolderRows[i].Delete();
}
foreach (string fld in _toFolders)
{
DataRow ToFolderRow = ds.Tables["ToFolder"].NewRow();
ToFolderRow["JobId"] = ds.Tables["Job"].Rows[currentIndex]["JobId"];
ToFolderRow["Job_Id"] = ds.Tables["Job"].Rows[currentIndex]["Job_Id"];
ToFolderRow["FolderPath"] = fld;
ds.Tables["ToFolder"].Rows.Add(ToFolderRow);
}
}
public void Delete()
{
ds.Tables["Job"].Rows.RemoveAt(currentIndex);
LoadValues(currentIndex++);
}
public void MoveNext()
{
Save();
currentIndex++;
LoadValues(currentIndex);
}
public void MovePrevious()
{
Save();
currentIndex--;
LoadValues(currentIndex);
}
public void MoveFirst()
{
Save();
LoadValues(0);
}
public void MoveLast()
{
Save();
LoadValues(ds.Tables["Job"].Rows.Count - 1);
}
public void CreateNew()
{
Save();
int MaxJobId = 0;
Int32.TryParse(ds.Tables["Job"].Compute("Max(JobId)", "").ToString(), out MaxJobId);
DataRow rw = ds.Tables["Job"].NewRow();
rw["JobId"] = MaxJobId + 1;
ds.Tables["Job"].Rows.Add(rw);
LoadValues(ds.Tables["Job"].Rows.IndexOf(rw));
}
public void Commit()
{
Save();
ds.WriteXml(Properties.Settings.Default.ConfigLocation);
}
private void LoadValues(int index)
{
if (index > ds.Tables["Job"].Rows.Count - 1)
{ currentIndex = ds.Tables["Job"].Rows.Count - 1; }
else if (index < 0)
{ currentIndex = 0; }
else
{ currentIndex = index; }
DataRow rw = ds.Tables["Job"].Rows[currentIndex];
_jobName = rw["JobName"].ToString();
_fromFolder = rw["FromFolder"].ToString();
_filter = rw["FilterText"].ToString();
switch (rw["FilterType"].ToString())
{
case "TextFilter":
JobFilterType = FilterType.TextFilter;
break;
case "RegExFilter":
JobFilterType = FilterType.RegExFilter;
break;
default:
JobFilterType = FilterType.NoFilter;
break;
}
if (_toFolders == null)
_toFolders = new List<string>();
_toFolders.Clear();
foreach (DataRow crw in rw.GetChildRows("Job_ToFolder"))
{
AddToFolder(crw["FolderPath"].ToString());
}
}
private static FileInfo[] GetFilesByRegEx(Regex rgx, string locPath)
{
DirectoryInfo d = new DirectoryInfo(locPath);
FileInfo[] fullFileList = d.GetFiles();
List<FileInfo> filteredList = new List<FileInfo>();
foreach (FileInfo fi in fullFileList)
{
if (rgx.IsMatch(fi.Name))
{
filteredList.Add(fi);
}
}
return filteredList.ToArray();
}
private static FileInfo[] GetFilesByFilter(string filter, string locPath)
{
DirectoryInfo d = new DirectoryInfo(locPath);
FileInfo[] fi = d.GetFiles(filter);
return fi;
}
private void CopyFiles(FileInfo[] files, string destPath)
{
foreach (FileInfo fi in files)
{
bool success = false;
int i = 0;
string copyToName = fi.Name;
string copyToExt = fi.Extension;
string copyToNameWithoutExt = Path.GetFileNameWithoutExtension(fi.FullName);
while (!success && i < 100)
{
i++;
try
{
if (File.Exists(Path.Combine(destPath, copyToName)))
throw new CopyFileExistsException();
File.Copy(fi.FullName, Path.Combine(destPath, copyToName));
success = true;
}
catch (CopyFileExistsException ex)
{
copyToName = String.Format("{0} ({1}){2}", copyToNameWithoutExt, i, copyToExt);
}
}
}
}
}
public class CopyFileExistsException : Exception
{
public string Message;
}
}
This code is also "screaming" to be broken down into smaller more specialized objects.
Your CopyJob object seems to be more of a manager of a list of jobs. I would maybe change the name of this to CopyJobManager or something. You could then have CopyJob be the base class for the different filter types. The common code, Execute() for example, would be defined in the base class, and the custom behavior, Filtering for example, would be handled in the derived classes. You would have a TextFilterCopyJob, a RegExFilterCopyJob, and a NoFilterCopyJob.
Where the Factory pattern could come into play is when you're building a list of CopyJobs. You could have a CopyJobFactory object that takes in a row from your dataset and returns the proper child version of CopyJob. CopyJobManager would then do its operations on a list of CopyJobs instead of a list of dataset rows.
Whenever I see Swithcs or bricks of Ifs, I jump to the conclusion that atleast a strategy pattern could be created.
a clean and easy way to set one up, is use a dictionary<>
in your case your going to want a key value based on the filterName your cases relate to, and the value will be a new object of the filters.
now you can merely give the string to the dictionarys TryGetValue method and have it retrieve the correct filter object for you, boom!
Now you can encapsulate the mapping of the filters <--> Strings, and keep the logic and use of the filters from having to see the logic of retrieving the correct object!
There's nothing wrong with using a switch statement like you have. It's not screaming for any design pattern other than that you can put it in a function so that you don't have the same switch twice.
The switch will be faster than using reflection, and the problem you're trying to solve doesn't really require the Factory pattern.
Here's some of what I did to implement a Factory pattern
First, I created an interface for the filter:
interface IFileFilter
{
string GetFilterName();
string GetFilterReadableName();
FileInfo[] GetFilteredFiles(string path, string filter);
}
then I created sub-filter classes for this interface:
class RegExFileFilter : IFileFilter
{
#region IFileFilter Members
public string GetFilterName()
{
return "RegExFilter";
}
public string GetFilterReadableName()
{
return "RegEx Filter";
}
public FileInfo[] GetFilteredFiles(string path, string filter)
{
DirectoryInfo d = new DirectoryInfo(path);
FileInfo[] fullFileList = d.GetFiles();
List<FileInfo> filteredList = new List<FileInfo>();
Regex rgx = new Regex(filter);
foreach (FileInfo fi in fullFileList)
{
if (rgx.IsMatch(fi.Name))
{
filteredList.Add(fi);
}
}
return filteredList.ToArray();
}
#endregion
}
class TextFileFilter : IFileFilter
{
#region IFileFilter Members
public string GetFilterName()
{
return "TextFilter";
}
public string GetFilterReadableName()
{
return "Text Filter";
}
public FileInfo[] GetFilteredFiles(string path, string filter)
{
DirectoryInfo d = new DirectoryInfo(path);
FileInfo[] fi = d.GetFiles(filter);
return fi;
}
#endregion
}
class NoFileFilter : IFileFilter
{
#region IFileFilter Members
public string GetFilterName()
{
return "TextFilter";
}
public string GetFilterReadableName()
{
return "Text Filter";
}
public FileInfo[] GetFilteredFiles(string path, string filter)
{
DirectoryInfo d = new DirectoryInfo(path);
FileInfo[] fi = d.GetFiles(filter);
return fi;
}
#endregion
}
Then I created a Factory:
public static IFileFilter FileFilter(string filterName)
{
switch (filterName)
{
case "Text Filter":
return new TextFileFilter();
case "RegEx Filter":
return new RegExFileFilter();
default:
return new NoFileFilter();
}
}
I would suggest the following:
Refactor the switch statements (as #Jordan mentioned)
Add an extension method to convert the FilterType enum into an int and save that to the database rather than a string. E.g.
public static class FilterTypeExtensions
{
public static int AsNumeric(this FilterType filterType)
{
return (int)filterType;
}
}
As a minor point, the single line braces are horrible, either drop the braces or use proper spacing/indentation. :)