newton.raphson.w<-function(x.dados, alpha.0=10, precisao=1e-7, n=100){dlogLikW<-function(y){(length(x.dados)/y)+sum(log(x.dados))-sum(log(x.dados)*(x.dados^y))}ddlogLikW<-function(z){-(length(x.dados)/z^2)-sum((log(x.dados)^2)*(x.dados^z))}for(iin1:n){alpha.1<-alpha.0-dlogLikW(alpha.0)/ddlogLikW(alpha.0)if(abs(alpha.1-alpha.0)<precisao){res<-list(alpha.estimado =alpha.1, n.iter =i)return(res)}alpha.0<-alpha.1}print("with the number of iterations there was no convergence.")}dados%>%pull()%>%newton.raphson.w()
$alpha.estimado
[1] 4.965997
$n.iter
[1] 5
Code
#simulationn<-seq(10, 1000, 10)estimativas<-sapply(n, function(size){dat<-rweibull(size, shape =5, scale =1)newton_result<-newton.raphson.w(dat, 2)newton_result$alpha.estimado})dados<-tibble( estim =estimativas, sim =n)ggplot(dados, aes(x =sim, y =estim))+geom_point(color ="blue")+geom_line(y =5, linetype ="dashed")+labs(x ="Sample size", y =expression(paste("Estimate for ", alpha)))
Therefore, the estimated alpha for the data provided was \(\hat{\alpha}=4.9659\). Moreover, we can observe that in the simulation part, the larger the sample, the algorithm will have the better performance.