I'm rewriting this entire question because I realize the cause, but still need a solution:
I have a recurring job in Hangfire that runs every minute and check the database, possibly updates some stuff, then exits.
I inject my dbcontext into the class containing the job method. I register this dbcontext to get injected using the following
builder.RegisterType<ApplicationDbContext>().As<ApplicationDbContext>().InstancePerLifetimeScope();
However, it seems that Hangfire does not create a seperate lifetime scope every time the job runs, because the constructor only gets called once, although the job method get's called every minute.
This causes issues for me. If the user updates some values in the database (dbcontext gets injected somewhere else, and used to update values), the context still being used Hangfire starts returning out-dated values that have already been changed.
Hangfire currently uses a shared Instance of JobActivator for every Worker, which are using the following method for resolving a dependency:
public override object ActivateJob(Type jobType)
It is planned to add a JobActivationContext to this method for Milestone 2.0.0.
For now, there is no way to say for which job a dependency gets resolved. The only way I can think of to workaround this issue would be to use the fact that jobs are running serial on different threads (I don't know AutoFac so I use Unity as an example).
You could create a JobActivator that can store separate scopes per thread:
public class UnityJobActivator : JobActivator
{
[ThreadStatic]
private static IUnityContainer childContainer;
public UnityJobActivator(IUnityContainer container)
{
// Register dependencies
container.RegisterType<MyService>(new HierarchicalLifetimeManager());
Container = container;
}
public IUnityContainer Container { get; set; }
public override object ActivateJob(Type jobType)
{
return childContainer.Resolve(jobType);
}
public void CreateChildContainer()
{
childContainer = Container.CreateChildContainer();
}
public void DisposeChildContainer()
{
childContainer.Dispose();
childContainer = null;
}
}
Use a JobFilter with IServerFilter implementation to set this scope for every job (thread):
public class ChildContainerPerJobFilterAttribute : JobFilterAttribute, IServerFilter
{
public ChildContainerPerJobFilterAttribute(UnityJobActivator unityJobActivator)
{
UnityJobActivator = unityJobActivator;
}
public UnityJobActivator UnityJobActivator { get; set; }
public void OnPerformed(PerformedContext filterContext)
{
UnityJobActivator.DisposeChildContainer();
}
public void OnPerforming(PerformingContext filterContext)
{
UnityJobActivator.CreateChildContainer();
}
}
And finally setup your DI:
UnityJobActivator unityJobActivator = new UnityJobActivator(new UnityContainer());
JobActivator.Current = unityJobActivator;
GlobalJobFilters.Filters.Add(new ChildContainerPerJobFilterAttribute(unityJobActivator));
We have created a new pull request in the Hangfire.Autofac with the work around described by Dresel. Hopefully it gets merged in the main branch:
https://github.com/HangfireIO/Hangfire.Autofac/pull/4
Edit: With Autofac, .NET 4.5 and Hangfire >= 1.5.0, use the Hangfire.Autofac nuget package (github).
Working with .NET 4.0 (Autofac 3.5.2 and Hangfire 1.1.1), we set up Dresel's solution with Autofac. Only difference is in the JobActivator:
using System;
using Autofac;
using Hangfire;
namespace MyApp.DependencyInjection
{
public class ContainerJobActivator : JobActivator
{
[ThreadStatic]
private static ILifetimeScope _jobScope;
private readonly IContainer _container;
public ContainerJobActivator(IContainer container)
{
_container = container;
}
public void BeginJobScope()
{
_jobScope = _container.BeginLifetimeScope();
}
public void DisposeJobScope()
{
_jobScope.Dispose();
_jobScope = null;
}
public override object ActivateJob(Type type)
{
return _jobScope.Resolve(type);
}
}
}
To work around this problem, I've created a disposable JobContext class that has a ILifetimeScope that will be disposed when Hangfire completes the job. The real job is invoked by reflection.
public class JobContext<T> : IDisposable
{
public ILifetimeScope Scope { get; set; }
public void Execute(string methodName, params object[] args)
{
var instance = Scope.Resolve<T>();
var methodInfo = typeof(T).GetMethod(methodName);
ConvertParameters(methodInfo, args);
methodInfo.Invoke(instance, args);
}
private void ConvertParameters(MethodInfo targetMethod, object[] args)
{
var methodParams = targetMethod.GetParameters();
for (int i = 0; i < methodParams.Length && i < args.Length; i++)
{
if (args[i] == null) continue;
if (!methodParams[i].ParameterType.IsInstanceOfType(args[i]))
{
// try convert
args[i] = args[i].ConvertType(methodParams[i].ParameterType);
}
}
}
void IDisposable.Dispose()
{
if (Scope != null)
Scope.Dispose();
Scope = null;
}
}
There is a JobActivator that will inspect the action and create the LifetimeScope if necessary.
public class ContainerJobActivator : JobActivator
{
private readonly IContainer _container;
private static readonly string JobContextGenericTypeName = typeof(JobContext<>).ToString();
public ContainerJobActivator(IContainer container)
{
_container = container;
}
public override object ActivateJob(Type type)
{
if (type.IsGenericType && type.GetGenericTypeDefinition().ToString() == JobContextGenericTypeName)
{
var scope = _container.BeginLifetimeScope();
var context = Activator.CreateInstance(type);
var propertyInfo = type.GetProperty("Scope");
propertyInfo.SetValue(context, scope);
return context;
}
return _container.Resolve(type);
}
}
To assist with creating jobs, without using string parameters there is another class with some extensions.
public static class JobHelper
{
public static object ConvertType(this object value, Type destinationType)
{
var sourceType = value.GetType();
TypeConverter converter = TypeDescriptor.GetConverter(sourceType);
if (converter.CanConvertTo(destinationType))
{
return converter.ConvertTo(value, destinationType);
}
converter = TypeDescriptor.GetConverter(destinationType);
if (converter.CanConvertFrom(sourceType))
{
return converter.ConvertFrom(value);
}
throw new Exception(string.Format("Cant convert value '{0}' or type {1} to destination type {2}", value, sourceType.Name, destinationType.Name));
}
public static Job CreateJob<T>(Expression<Action<T>> expression, params object[] args)
{
MethodCallExpression outermostExpression = expression.Body as MethodCallExpression;
var methodName = outermostExpression.Method.Name;
return Job.FromExpression<JobContext<T>>(ctx => ctx.Execute(methodName, args));
}
}
So to queue up a job, e.g. with the following signature:
public class ResidentUploadService
{
public void Load(string fileName)
{
//...
}
The code to create the job looks like
var localFileName = "Somefile.txt";
var job = ContainerJobActivator
.CreateJob<ResidentUploadService>(service => service.Load(localFileName), localFileName);
var state = new EnqueuedState("queuename");
var client = new BackgroundJobClient();
client.Create(job,state);
A solution is supported out-of-the-box since hangfire.autofac 2.2.0.
In your situation, where your dependency is being registered per-lifetime-scope, you should be able to use non-tagged scopes when setting up hangfire.autofac. From the link:
GlobalConfiguration.Configuration.UseAutofacActivator(builder.Build(), false);
Related
Lets say I want to use, for example, a new DbContext object whenever a method is called in a class but without getting it by a parameter. Like so
class MyClass {
public virtual void MethodOne() {
// Having automatically a new instance of DbContext
}
public virtual void MethodTwo() {
// Also having automatically a new instance of DbContext
}
}
What I was really hoping for was a DI way of doing this. Like public void Method(IMyWayOfContext context).
class MyClass {
public virtual void MethodOne(IMyWayOfContext context)) {
}
public virtual void MethodTwo(IMyWayOfContext context) {
}
}
Other classes inheriting from this class must be provided with a new instance of dbcontext. That's why I don't want to create a new instance inside of the function
You could do something like this (generic interface, plus a wrapper with multiple constraints):
class DBContext{ }
interface IDoesMethods<TContext> where TContext : new()
{
void MethodOne(TContext context = default(TContext));
void MethodTwo(TContext context = default(TContext));
}
class MyClass : IDoesMethods<DBContext>
{
public void MethodOne(DBContext context)
{
}
public void MethodTwo(DBContext context)
{
}
}
class MyContextWrapper<TClass, TContext> : IDoesMethods<TContext> where TContext : new() where TClass : IDoesMethods<TContext>, new()
{
public void MethodOne(TContext context = default(TContext))
{
instance.MethodOne(new TContext());
}
public void MethodTwo(TContext context = default(TContext))
{
instance.MethodTwo(new TContext());
}
private TClass instance = new TClass();
}
class Program
{
static void Main(string[] args)
{
var wrapper = new MyContextWrapper<MyClass, DBContext>();
wrapper.MethodOne();
wrapper.MethodTwo();
}
}
Make a property with only getter that will return new instance every time
protected DbContext MyDBContext
{
get
{
return new DbContext();
}
}
EDIT: If you want some kind of dependency injection you can make your class generic and pass to instance of the class what type of context you want
class MyClass<T> {
protected DbContext MyDBContext
{
get
{
return Activator.CreateInstance<T>();
}
}
public void MethodOne() {
// Having automatically a new instance of DbContext
}
public void MethodTwo() {
// Also having automatically a new instance of DbContext
}
}
Your simple solution can work this way:
class MyClass {
protected DbContext InternalContext {
return new DbContext();
}
public virtual void MethodOne(DbContext dc = null) {
if(dc == null)
dc = InternalContext;
// do your work
}
public virtual void MethodTwo(DbContext dc = nnull) {
if(dc == null)
dc = InternalContext;
// do your work
}
}
In that case, you have to take care of disposing InternalContext
While answer here looks valid, they don't seem to fulfill perfectly your requirement of having a solution that rely on DI.
DI in it's simplest expression is most of the time achieve with Constructor Injection.
Your design was already good and DI ready.
Indeed, asking for dependencies via the constructor is good.
It is at the composition root of your application that you need to decide what implementation you need to pass.
Using a DI library can help (but it is not required to enable DI).
With your actual class design:
class MyClass {
public virtual void MethodOne(IMyWayOfContextFactory contextFactory)) {
using(var context = contextFactory.Create()){
//play with context
}
}
public virtual void MethodTwo(IMyWayOfContextFactory contextFactory) {
using(var context = contextFactory.Create()){
//play with context
}
}
}
public ContextFactory : IMyWayOfContextFactory {
IMyWayOfContext Create(){
return new MyWayOfContext();
}
}
Without a factory and with a DI container like SimpleInjector, you could have:
class MyClass {
public virtual void MethodOne(IMyWayOfContext context)) {
//play with context
}
public virtual void MethodTwo(IMyWayOfContext context) {
//play with context
}
}
And register your component once at the composition root with configurable Lifestyle management:
container.Register<IMyWayOfContext, MyWayOfContext>(Lifestyle.Transient);
The latter approach is simpler if you want to configure when to inject what instance of your context. Indeed, such configuration is built in an DI Container library. For instance, see: Lifestyle of component with SimpleInjector
I'm very new to using Unity.
I'm trying to test a segment of code in LINQPad. This code uses a DBContext which relies on Log4Net as a service. I'm trying to write the sample code to use the actual DBContext, but can't get it to construct.
The code I'm trying is below. If there is more information needed, please ask for it before down-voting, since I'm not sure how much you need to see to understand my issue, since I'm still learning Unity.
void Main()
{
RegisterObjects();
var logger = IoCHelper.Resolve<ILogger>();
//var _logger = new Clark.Logging.MultiLogger();
var _logger = logger;
var _ediContext = new EdiContext();
var transactionId = 1008;
var limit = 0;
var temp = new Type210SubscriberProvider(_ediContext)
.GetAfterNew(transactionId, limit);
temp.Dump();
var temp2 = new Type210SubscriberProvider(_ediContext)
.GetAfter(transactionId, limit);
temp2.Dump();
}
public void RegisterObjects()
{
XmlConfigurator.Configure();
var multiLogger = new MultiLogger();
multiLogger.Register(new Log4NetLogger());
IoCHelper.RegisterInstance<ILogger>(multiLogger);
}
If I try just this:
void Main()
{
var _ediContext = new EdiContext();
}
The error message I am receiving in LINQPad is:
Resolution of the dependency failed, type = "Clark.Logging.ILogger", name = "(none)".
Exception occurred while: while resolving.
Exception is: InvalidOperationException - The current type, Clark.Logging.ILogger, is an interface and cannot be constructed. Are you missing a type mapping?
At the time of the exception, the container was:
Resolving Clark.Logging.ILogger,(none)
UPDATE:
Here is some more detail from the Global.asax.cs file. I'm not sure how to translate this to LINQPad.
protected void Application_Start()
{
// GlobalConfiguration.Configuration is an HttpConfiguration object.
ConfigureContainer(GlobalConfiguration.Configuration);
ConfigureServices(GlobalConfiguration.Configuration);
}
private static void ConfigureServices(HttpConfiguration configuration)
{
configuration.Services.Add(typeof (IExceptionLogger), new UnhandledExceptionLogger(GetLogger()));
configuration.Services.Replace(typeof (IExceptionHandler), new UnhandledExceptionHandler());
}
private static void ConfigureContainer(HttpConfiguration config)
{
config.DependencyResolver = new IoCContainer(IoCHelper.Container);
new LoggingDependencyInitializer().RegisterObjects();
IoCHelper.RegisterType<IEdiContext>(new InjectionFactory(unityContainer => new EdiContext()));
IoCHelper.RegisterType<SubscriberController>();
IoCHelper.RegisterType<ConsumerInformationController>();
IoCHelper.RegisterType<TransactionTypeController>();
}
private static ILogger GetLogger()
{
return IoCHelper.Resolve<ILogger>();
}
Here is the IoCHelper class:
public static class IoCHelper
{
private static UnityContainer _container;
public static UnityContainer Container
{
get { return _container ?? (_container = new UnityContainer()); }
}
public static T Resolve<T>()
{
return Container.Resolve<T>();
}
public static void RegisterType<TFrom, TTo>() where TTo : TFrom
{
Container.RegisterType<TFrom, TTo>();
}
public static void RegisterInstance(Type type, object instance)
{
Container.RegisterInstance(type, instance);
}
public static void RegisterType<T>()
{
Container.RegisterType<T>();
}
public static void RegisterInstance<T>(T instance)
{
Container.RegisterInstance(instance);
}
public static void RegisterType<T>(InjectionFactory injectionFactory)
{
Container.RegisterType<T>(injectionFactory);
}
}
When using an IOC container, you need to register your types (e.g. map interfaces \ abstract classes into their real types). Since when you want the container to resolve an interface, it will want to find it's mapping before providing the instance.
BTW, if Unity doesn't find a map, it will try to construct the type. In your case it fails since you cannot construct an interface.
With Unity, you can do it either by configuration using:
var section = ConfigurationManager.GetSection(SectionName) as UnityConfigurationSection;
if (section != null)
{
section.Configure(container);
}
Or directly:
container.RegisterType<InterfaceType, ConcreteType>();
Since I see this code line in your example:
IoCHelper.RegisterInstance<ILogger>(multiLogger);
You are registering ILogger with a specific instance. There must be a problem with your IoCHelper implementation. Please add more code and I'll edit my answer with a specific solution.
Introduction
Class SessionModel is a service locator providing several services (I am going to elaborate my system architecture in the future, but for now I need to do it that way).
Code
I edited the following code part to be a Short, Self Contained, Correct (Compilable), Example (SSCCE):
using System;
using System.ComponentModel.Composition;
using System.ComponentModel.Composition.Hosting;
namespace ConsoleApplication1
{
internal class Program
{
private static void Main(string[] args)
{
var sessionModel = new SessionModel(3);
// first case (see text down below):
var compositionContainer = new CompositionContainer();
// second case (see text down below):
//var typeCatalog = new TypeCatalog(typeof (SessionModel));
//var compositionContainer = new CompositionContainer(typeCatalog);
compositionContainer.ComposeExportedValue(sessionModel);
var someService = compositionContainer.GetExportedValue<ISomeService>();
someService.DoSomething();
}
}
public class SessionModel
{
private int AValue { get; set; }
[Export]
public ISomeService SomeService { get; private set; }
public SessionModel(int aValue)
{
AValue = aValue;
// of course, there is much more to do here in reality:
SomeService = new SomeService();
}
}
public interface ISomeService
{
void DoSomething();
}
public class SomeService : ISomeService
{
public void DoSomething()
{
Console.WriteLine("DoSomething called");
}
}
}
Problem
I would like MEF to consider the parts (i.e. SomeService) exported by the service locator when composing other parts, but unfortunately this does not work.
First Case
When I try to get the exported value for ISomeService there is a System.ComponentModel.Composition.ImportCardinalityMismatchException telling me there are no exports with this contract name and required type identity (ConsoleApplication1.ISomeService).
Second Case
If I create the CompositionContainer using the TypeCatalog the exception is slightly different. It is a System.ComponentModel.Composition.CompositionException telling me MEF doesn't find a way to create a ConsoleApplication1.SessionModel (which is right and the reason why I am doing it myself).
Additional Information
mefx says for both cases:
[Part] ConsoleApplication1.SessionModel from: DirectoryCatalog (Path=".")
[Export] ConsoleApplication1.SessionModel.SomeService (ContractName="ConsoleApplication1.ISomeService")
[Part] ConsoleApplication1.SessionModel from: AssemblyCatalog (Assembly="ConsoleApplication1, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null")
[Export] ConsoleApplication1.SessionModel.SomeService (ContractName="ConsoleApplication1.ISomeService")
What do I have to do? Is this possible with MEF or do I have to use Unity or StructureMap, or something else? Can this be done implementing an ExportProvider?
OK, that's how I did it:
I implemented my own SessionModelExportProvider finding exports in my SessionModel (see code below). Class SessionModelExport is just for holding the export data and – instead of creating an instance of a service – returning the value of the property of the SessionModel.
public class SessionModelExportProvider : ExportProvider
{
private List<Export> Exports { get; set; }
public SessionModelExportProvider(SessionModel sessionModel)
{
// get all the properties of the session model having an Export attribute
var typeOfSessionModel = typeof (SessionModel);
PropertyInfo[] properties = typeOfSessionModel.GetProperties();
var propertiesHavingAnExportAttribute =
from p in properties
let exportAttributes = p.GetCustomAttributes(typeof (ExportAttribute), false)
where exportAttributes.Length > 0
select new
{
PropertyInfo = p,
ExportAttributes = exportAttributes
};
// creating Export objects for each export
var exports = new List<Export>();
foreach (var propertyHavingAnExportAttribute in propertiesHavingAnExportAttribute)
{
var propertyInfo = propertyHavingAnExportAttribute.PropertyInfo;
foreach (ExportAttribute exportAttribute in propertyHavingAnExportAttribute.ExportAttributes)
{
string contractName = exportAttribute.ContractName;
if (string.IsNullOrEmpty(contractName))
{
Type contractType = exportAttribute.ContractType ?? propertyInfo.PropertyType;
contractName = contractType.FullName;
}
var metadata = new Dictionary<string, object>
{
{CompositionConstants.ExportTypeIdentityMetadataName, contractName},
{CompositionConstants.PartCreationPolicyMetadataName, CreationPolicy.Shared}
};
var exportDefinition = new ExportDefinition(contractName, metadata);
var export = new SessionModelExport(sessionModel, propertyInfo, exportDefinition);
exports.Add(export);
}
}
Exports = exports;
}
protected override IEnumerable<Export> GetExportsCore(ImportDefinition definition,
AtomicComposition atomicComposition)
{
return Exports.Where(e => definition.IsConstraintSatisfiedBy(e.Definition));
}
}
public class SessionModelExport : Export
{
private readonly SessionModel sessionModel;
private readonly PropertyInfo propertyInfo;
private readonly ExportDefinition definition;
public SessionModelExport(SessionModel sessionModel, PropertyInfo propertyInfo, ExportDefinition definition)
{
this.sessionModel = sessionModel;
this.propertyInfo = propertyInfo;
this.definition = definition;
}
public override ExportDefinition Definition
{
get { return definition; }
}
protected override object GetExportedValueCore()
{
var value = propertyInfo.GetValue(sessionModel, null);
return value;
}
}
The problem is that the SomeService is an instance property. You could have several SessionModel objects in your system, and MEF would have no way of knowing which SessionModel is returning the ISomeService instance that is supposed to be matched to an import.
Instead, just make SessionModel a static class and SomeService a static property. Alternatively, make SessionModel a singleton. The SomeService property would still be static, but would export the service from the one-and-only instance of SessionModel.
using System;
using System.ComponentModel.Composition;
using System.ComponentModel.Composition.Hosting;
using System.ComponentModel.Composition.ReflectionModel;
using System.Reflection;
using System.Linq;
namespace ConsoleApplication1
{
internal class Program
{
private static void Main(string[] args)
{
var catalogs = new AggregateCatalog();
var catalog = new System.ComponentModel.Composition.Hosting.AssemblyCatalog(Assembly.GetExecutingAssembly());
catalogs.Catalogs.Add(catalog);
var sessionModel = new SessionModel(3);
var container = new CompositionContainer(catalog);
ISomeService someService = container.GetExportedValueOrDefault<ISomeService>(sessionModel.cname);
if (someService != null)
{
someService.DoSomething();
}
}
}
public class SessionModel
{
private int AValue { get; set; }
//[Import("One",typeof(ISomeService))]
//public ISomeService SomeService { get; private set; }
public SessionModel(int aValue)
{
AValue = aValue;
// of course, there is much more to do here in reality:
}
public string cname { get { return "One"; } }
}
public class SessionModel1
{
private int AValue { get; set; }
//[Import("Two",typeof(ISomeService))]
//public ISomeService SomeService { get; private set; }
public SessionModel1(int aValue)
{
AValue = aValue;
}
public string cname { get { return "Two"; } }
}
public interface ISomeService
{
void DoSomething();
}
[Export("One",typeof(ISomeService))]
public class SomeService : ISomeService
{
public SomeService()
{
Console.WriteLine("Some Service Called");
}
public void DoSomething()
{
Console.WriteLine("DoSomething called");
Console.ReadKey();
}
}
[Export("Two",typeof(ISomeService))]
public class SomeService1 : ISomeService
{
public SomeService1()
{
Console.WriteLine("Some Service1 Called");
}
public void DoSomething()
{
Console.WriteLine("DoSomething called 1");
Console.ReadKey();
}
}
}
First case: By passing sessionModel to ComposeExportedValue you add a part of type SessionModel and not of ISomeService. To make this case work you nee to pass the service to ComposeExportedValue.
compositionContainer.ComposeExportedValue(sessionModel.SomeService);
Second case: In this case you leave the creation of parts to the container. The container can create new parts if there is either a parameter-less constructor or a constructor with parameters decorated with the ImportingConstructorAttribute. This most probably means that you will need to change your design a bit.
Personally I would go with the first case, but try to keep this to a minimum. After all the normal (and suggested) usage of MEF is letting the container create and handle parts.
We're using a library that uses pooled objects (ServiceStack.Redis's PooledRedisClientManager). Objects are created and reused for multiple web requests. However, Dispose should be called after each use to release the object back into the pool.
By default, Ninject only deactivates an object reference if it has not been deactivated before.
What happens is that the pool instantiates an object and marks it as active. Ninject then runs the activation pipeline. At the end of the request (a web request), Ninject runs the deactivation pipeline which calls Dispose (and thus the pool marks the object as inactive). The next request: the first pooled instance is used and the pool marks it as active. However, at the end of the request, Ninject does not run its deactivation pipeline because the ActivationCache has already marked this instance as deactivated (this is in the Pipeline).
Here's a simple sample that we've added in a new MVC project to demonstrate this problem:
public interface IFooFactory
{
IFooClient GetClient();
void DisposeClient(FooClient client);
}
public class PooledFooClientFactory : IFooFactory
{
private readonly List<FooClient> pool = new List<FooClient>();
public IFooClient GetClient()
{
lock (pool)
{
var client = pool.SingleOrDefault(c => !c.Active);
if (client == null)
{
client = new FooClient(pool.Count + 1);
client.Factory = this;
pool.Add(client);
}
client.Active = true;
return client;
}
}
public void DisposeClient(FooClient client)
{
client.Active = false;
}
}
public interface IFooClient
{
void Use();
}
public class FooClient : IFooClient, IDisposable
{
internal IFooFactory Factory { get; set; }
internal bool Active { get; set; }
internal int Id { get; private set; }
public FooClient(int id)
{
this.Id = id;
}
public void Dispose()
{
if (Factory != null)
{
Factory.DisposeClient(this);
}
}
public void Use()
{
Console.WriteLine("Using...");
}
}
public class HomeController : Controller
{
private IFooClient foo;
public HomeController(IFooClient foo)
{
this.foo = foo;
}
public ActionResult Index()
{
foo.Use();
return View();
}
public ActionResult About()
{
return View();
}
}
// In the Ninject configuration (NinjectWebCommon.cs)
private static void RegisterServices(IKernel kernel)
{
kernel.Bind<IFooFactory>()
.To<PooledFooClientFactory>()
.InSingletonScope();
kernel.Bind<IFooClient>()
.ToMethod(ctx => ctx.Kernel.Get<IFooFactory>().GetClient())
.InRequestScope();
}
The solutions that we've come up with thus far are:
Mark these objects as InTransientScope() and use other deactivation mechanism (like an MVC ActionFilter to dispose of the object after each request). We'd lose the benefits of Ninject's deactivation process and require an indirect approach to disposing of the object.
Write a custom IActivationCache that checks the pool to see if the object is active. Here's what I've written so far, but I'd like some one else's eyes to see how robust it is:
public class PooledFooClientActivationCache : DisposableObject, IActivationCache, INinjectComponent, IDisposable, IPruneable
{
private readonly ActivationCache realCache;
public PooledFooClientActivationCache(ICachePruner cachePruner)
{
realCache = new ActivationCache(cachePruner);
}
public void AddActivatedInstance(object instance)
{
realCache.AddActivatedInstance(instance);
}
public void AddDeactivatedInstance(object instance)
{
realCache.AddDeactivatedInstance(instance);
}
public void Clear()
{
realCache.Clear();
}
public bool IsActivated(object instance)
{
lock (realCache)
{
var fooClient = instance as FooClient;
if (fooClient != null) return fooClient.Active;
return realCache.IsActivated(instance);
}
}
public bool IsDeactivated(object instance)
{
lock (realCache)
{
var fooClient = instance as FooClient;
if (fooClient != null) return !fooClient.Active;
return realCache.IsDeactivated(instance);
}
}
public Ninject.INinjectSettings Settings
{
get
{
return realCache.Settings;
}
set
{
realCache.Settings = value;
}
}
public void Prune()
{
realCache.Prune();
}
}
// Wire it up:
kernel.Components.RemoveAll<IActivationCache>();
kernel.Components.Add<IActivationCache, PooledFooClientActivationCache>();
Specifically for ServiceStack.Redis's: use the PooledRedisClientManager.DisposablePooledClient<RedisClient> wrapper so we always get a new object instance. Then let the client object become transient since the wrapper takes care of disposing it. This approach does not tackle the broader concept of pooled objects with Ninject and only fixes it for ServiceStack.Redis.
var clientManager = new PooledRedisClientManager();
kernel.Bind<PooledRedisClientManager.DisposablePooledClient<RedisClient>>()
.ToMethod(ctx => clientManager.GetDisposableClient<RedisClient>())
.InRequestScope();
kernel.Bind<IRedisClient>()
.ToMethod(ctx => ctx.Kernel.Get<PooledRedisClientManager.DisposablePooledClient<RedisClient>>().Client)
.InTransientScope();
Is one of these approaches more appropriate than the other?
I have not use Redis so far so I can not tell you how to do it correctly. But I can give you some input in general:
Disposing is not the only thing that is done by the ActivationPipeline. (E.g. it also does property/method injection and excuting activation/deactivation actions.) By using a custom activation cache that returns false even though it has been activated before will cause that these other actions are executed again (E.g. resulting in property injection done again.)
I'm wondering how to properly use abstract factories when using a DI framework and one of the parameters in that factory is a dependency that should be handled by the DI framework.
I am not sure whether to make my abstract factory omit the parameter completely then use my DI container to wire it up or whether I should pass the dependency to the object.
For example, I have a TcpServer and it uses a Session.Factory to create sockets. The Session object actually takes a Processor in its constructor. Should I pass the Processor to the TcpServer then have it pass it onto the Session.Factory or have my DI container do the wiring?
If I were to have the DI container do the wiring it would look like this:
class Session : ISession
{
public delegate ISession Factory(string name);
...
public Session(string name, Processor processor)
{
...
}
}
class TcpServer : ITcpServer
{
private readonly Session.Factory _sessionFactory;
public TcpServer(Session.Factory sessionFactory)
{
this._sessionFactory = socketFactory;
}
...
public void OnConnectionReceived()
{
...
var session= _sessionFactory(ip.LocalEndPoint());
...
}
}
Then using a DI container like Ninject I'd be able to do this when configuring the container:
Bind<Session.Factory>().ToMethod(c =>
{
var processor = Kernel.Get<Processor>();
return (name) => new Session(name, processor);
}).InSingletonScope();
My main issue with this approach is that it assumes whoever creates the Session.Factory knows about the processor. In my case, since I am using a DI container, this is actually very convenient but it seems weird to have a factory have its own dependencies. I always imagined a factory not really ever having any members.
If I were to pass the dependency through
class Session : ISession
{
public delegate ISession Factory(string name, Processor processor);
...
public Session(string name, Processor processor)
{
...
}
}
class TcpServer : ITcpServer
{
private readonly Session.Factory _sessionFactory;
private readonly Processor _processor;
public TcpServer(Session.Factory sessionFactory, Processor processor)
{
this._processor = processor;
}
...
public void OnConnectionReceived()
{
...
var session = _sessionFactory(ip.LocalEndPoint(), _processor);
...
}
}
I have two issues with the second approach:
The TcpServer doesn't actually do anything with the Processor. It just passes it along. Seems like this is poor man's DI at work almost.
In the real program behind this code, the Processor actually has a reference to the TcpServer. Therefore when using this approach, I get a circular reference. When I break it apart by using the first scenario then it's not an issue.
What do you think is the best approach? I am open to new ideas as well.
Thanks!
Many containers support factories in one or another way and this is the way you should go.
E.g. Taking your example define a ISessionFactory interface like this
public interface ISessionFactory
{
ISession CreateSession(string name);
}
For Ninject 2.3 see https://github.com/ninject/ninject.extensions.factory and let it be implemented by Ninject
Bind<ISessionFactory>().AsFactory();
For 2.2 do the implementation yourself
public class SessionFactory : ISessionFactory
{
private IKernel kernel;
public SessionFactory(IKernel kernel)
{
this.kernel = kernel;
}
public ISession CreateSession(string name)
{
return this.kernel.Get<ISession>(new ConstructorArgument("name", name));
}
}
The pattern I use for an abstract factory pattern is a little different from yours. I use something like setter injection on a generic singleton, but wrap the configurable delegate "property" in a more intuitive interface.
I would prefer not to have to register each implementation individually, so I would prefer to use some convention that can be tested at application start up. I'm not sure about the Ninject syntax for autoregistering custom conventions, but the logic would come down to scanning the relevant assemblies for reference types, T, that have static readonly fields of type AbstractFactory<T>, then calling Configure(Func<T>) on that static member using reflection.
An example of the generic abstract factory singleton and how it would be declared on a Session is below.
public class Session {
public static readonly AbstractFactory<Session> Factory = AbstractFactory<Session>.GetInstance();
}
public sealed class AbstractFactory<T>
where T: class{
static AbstractFactory(){
Bolt = new object();
}
private static readonly object Bolt;
private static AbstractFactory<T> Instance;
public static AbstractFactory<T> GetInstance(){
if(Instance == null){
lock(Bolt){
if(Instance == null)
Instance = new AbstractFactory<T>();
}
}
return Instance;
}
private AbstractFactory(){}
private Func<T> m_FactoryMethod;
public void Configure(Func<T> factoryMethod){
m_FactoryMethod = factoryMethod;
}
public T Create() {
if(m_FactoryMethod == null) {
throw new NotImplementedException();
}
return m_FactoryMethod.Invoke();
}
}
Update
If you need to pass parameters into your factory method, then you can alter the class such as:
public sealed class AbstractFactory<TDataContract,T>
where T: class{
static AbstractFactory(){
Bolt = new object();
}
private static readonly object Bolt;
private static AbstractFactory<TDataContract,T> Instance;
public static AbstractFactory<TDataContract,T> GetInstance(){
if(Instance == null){
lock(Bolt){
if(Instance == null)
Instance = new AbstractFactory<T>();
}
}
return Instance;
}
private AbstractFactory(){}
private Func<TDataContract,T> m_FactoryMethod;
public void Configure(Func<TDataContract,T> factoryMethod){
m_FactoryMethod = factoryMethod;
}
public T Create(TDataContract data) {
if(m_FactoryMethod == null) {
throw new NotImplementedException();
}
return m_FactoryMethod.Invoke(data);
}
}
Your SessionData, Session and TcpServer might look like
public class SessionData{
public DateTime Start { get; set; }
public string IpAddress { get; set; }
}
public class Session {
public static readonly AbstractFactory<SessionData,Session> Factory = AbstractFactory<Session>.GetInstance();
private readonly string _ip;
private readonly DateTime _start;
public Session(SessionData data) {
_ip = data.IpAddress;
_start = DateTime.Now;
}
public event EventHandler<RequestReceivedEventEventArgs> RequestAdded;
}
public class RequestReceivedEventArgs: EventArgs {
public SessionRequest Request { get; set; }
}
public class TcpServer : ITcpServer
{
private readonly Processor _processor;
public TcpServer(Processor processor)
{
this._processor = processor;
}
public void OnConnectionReceived()
{
var sessionData = new SessionData {
IpAddress = ip.LocalEndPoint(),
Start = DateTime.Now
};
var session = Session.Factory.Create(sessionData);
//...do other stuff
}
public void ServeResponse(SessionRequest request){
_processor.Process(request);
}
}
When configuring your DI container, you can set up the factory such as:
Session.Factory.Configure(sessionData => {
// instead of injecting the processor into the Session, configure events
// that allow the TcpServer to process the data.
// (After all, it is more logical for a servers to serve a request than
// it is for a Session to do the Processing. Session's tend to store data
// and state, not invoke processes
session.RequestAdded += (sender,e) => {
Kernel.Get<ITcpServer>.ServeResponse(e.Request);
};
});