How to put Rmpfr values into a function in R? - r

I am calculating the inverse of a Vandermonde Matrix. I have written the codes to calculate the inverse explicitly by its formula as below:
library(gtools)
#input is the generation vector of terms of Vandermonde matrix.
FMinv <- function(base){
n=length(base)
inv=matrix(nrow=n,ncol=n)
for (i in 1:n){
for (j in 1:n){
if(j<n){
a=as.matrix(combinations(n,n-j,repeats.allowed = F))
arow.tmp=nrow(a) #this is in fact a[,1]
b=which(a==i)%%length(a[,1])
nrowdel=length(b)
b=replace(b,b==0,length(a[,1]))
a=a[-b,]
if(arow.tmp-nrowdel>1){
a=as.matrix(a)
nrowa=nrow(a)
prod=vector()
for(k in 1:nrowa){
prod[k]=prod(base[a[k,]])
}
num=sum(prod)
}
if(arow.tmp-nrowdel==1){
num=prod(base[a])
}
den=base[i]*prod(base[-i]-base[i])
inv[i,j]=(-1)^(j-1)*num/den
}
if(j==n){
inv[i,j]=1/(base[i]*prod(base[i]-base[-i]))
}
}
}
return(inv)
}
And I define a base as follow:
> library(Rmpfr)
> a=mpfr(c(10:1),1000)/Rmpfr::mpfr(sum(1:10),1000)
> a
10 'mpfr' numbers of precision 1000 bits
[1] 0.18181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181819
[2] 0.16363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363637
[3] 0.14545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545456
[4] 0.12727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727274
[5] 0.10909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909091
[6] 0.090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909090909094
[7] 0.072727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727272727278
[8] 0.054545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545454545455
[9] 0.036363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363639
[10] 0.018181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181818181819
However, when I was attempting to put the "a" into the function, I got:
> FMinv(a)
Error in sum(prod) : invalid 'type' (list) of argument
By checking its type,
> typeof(a)
[1] "list"
The only thing that I know to transform it to values is asNumeric() in Rmpfr. However,
> asNumeric(a)
[1] 0.18181818 0.16363636 0.14545455 0.12727273 0.10909091 0.09090909 0.07272727 0.05454545 0.03636364 0.01818182
it lost the remaining digits.
Is there anyway to put the "a" into my function without losing decimals?
Thanks!

The trick is to use S3 methods.
Define a generic, a default method to be called with your "normal" numbers, meaning, objects of class "numeric" and the function the question is asking for.
That is the problem one. It took a while but I believe the code below is right.
library(OBsMD)
FMinv <- function(...) UseMethod("FMinv")
FMinv.default <- function(base) {
# Your function
# unchanged
}
FMinv.mpfr <- function(base, precBits = getPrec(base)) {
n <- length(base)
inv <- mpfr(rep(0, n*n), precBits = precBits)
inv <- matrix(inv, nrow = n, ncol = n)
for (i in 1:n) {
for (j in 1:n) {
if (j < n) {
a <- combinations(n, n - j, repeats.allowed = F)
a <- as.matrix(a)
arow.tmp <- nrow(a) # this is in fact a[, 1]
b <- which(a == i) %% length(a[, 1])
nrowdel <- length(b)
b <- replace(b, b == 0, length(a[, 1]))
a <- a[-b, ]
num <- mpfr(0, precBits[1])
if (arow.tmp - nrowdel > 1) {
a <- as.matrix(a)
nrowa <- nrow(a)
for (k in 1:nrowa) {
num <- num + prod(base[a[k, ]])
}
}
if (arow.tmp - nrowdel == 1) {
num <- num + prod(base[a])
}
den <- base[i] * prod(base[-i] - base[i])
inv[i, j] <- (-1)^(j - 1) * num/den
}
if (j == n) {
inv[i, j] <- 1/(base[i] * prod(base[i] - base[-i]))
}
}
}
return(inv)
}
Now test both methods and compare some of the results' values.
library(Rmpfr)
a <- mpfr(c(10:1),1000)/Rmpfr::mpfr(sum(1:10),1000)
inv1 <- FMinv(asNumeric(a))
inv2 <- FMinv(a)
inv1[10, 10]
#[1] -6.98014e+11
inv2[10, 10]
#1 'mpfr' number of precision 1000 bits
#[1] -698013564040.84166942239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906525573192239858906474

Related

Create a function to find the length of a vector WITHOUT using length()

I already tried max(seq_along(x)) but I need it to also return 0 if we, let's say, inputted numeric(0).
So yeah, it works for anything else other than numeric(0). This is what I have so far:
my_length <- function(x){
max(seq_along(x))
}
You can just include a 0 to the max() call in your attempt:
my_length <- function(x) max(0, seq_along(x))
my_length(10:1)
[1] 10
my_length(NULL)
[1] 0
my_length(numeric())
[1] 0
Using forloop:
my_length <- function(x){
l = 0
for(i in x) l <- l + 1
return(l)
}
x <- numeric(0)
my_length(x)
# [1] 0
x <- 1:10
my_length(x)
# [1] 10
Another option:
my_length <- function(x) nrow(matrix(x))
You can use NROW():
len <- \(x) NROW(x)
Examples:
len(numeric(0))
#> [1] 0
len(letters)
#> [1] 26
len(c(3, 0, 9, 1))
#> [1] 4
From the documentation:
nrow and ncol return the number of rows or columns present in x. NCOL and NROW do the same treating a vector as 1-column matrix, even a 0-length vector ...
Here are a few more functional programming approaches:
Using mapping and summation:
length = function (x) {
sum(vapply(x, \(.) 1L, integer(1L)))
}
Using reduction:
length = function (x) {
Reduce(\(x, .) x + 1L, x, 0L)
}
Using recursion:
length = function (x, len = 0L) {
if (is_empty(x)) len else Recall(x[-1L], len + 1L)
}
Alas, the last one needs to define the helper function and that is unfortunately not trivial without using length():
is_empty = function (x) {
is.null(x) || identical(x, vector(typeof(x), 0L))
}

Manipulating sub matrices in R

Nh<-matrix(c(17,26,30,17,23, 17 ,24, 23), nrow=2, ncol=4); Nh
Sh<-matrix(c(8.290133, 6.241174, 6.096808, 7.4449672, 6.894924, 7.692115,
4.540521, 7.409122), nrow=2, ncol=4); Sh
NhSh<-as.matrix(Nh*Sh); NhSh
rh<-c( 0.70710678, 0.40824829, 0.28867513, 0.22360680, 0.18257419,
0.15430335, 0.13363062, 0.11785113, 0.10540926, 0.09534626); rh
pv <- c()
for (j in 1:2) {
for (i in 1:4) {
pv <- rbind(pv, NhSh[j,i]*rh)
}
}
pv
row.names(pv) <- rep(c(1:2), each = 4)
lst<-lapply(split(seq_len(nrow(pv)), as.numeric(row.names(pv))), function(i)
pv[i,])
data<-40
nlargest <- function(x, data)
{
res <- order(x)[seq_len(data)];
pos <- arrayInd(res, dim(x), useNames = TRUE);
list(values = pv[res], position = pos)
}
out <- lapply(lst, nlargest, data = 40)
In continuation of above code Is there any brief way of repeating the following steps for each out$’k’$position for k in 1:2?
s1<-c(1,1,1,1); ch<-c(5,7,10,5); C<-150; a<-out$'1'$position
for (j in a[40:1, "row"] )
{
s1[j] <- s1[j]+1;
cost1 <- sum(ch*s1);
if (cost1>=C) break
}
s1; cost1
#Output [1] 5 6 6 5
# [1] 152
I have to get 2 values for 's' and 'cost' for out$k$position. I tried
mat = replicate (2,{x = matrix(data = rep(NA, 80), ncol = 2)}); mat
for (k in 1:2)
{
mat[,,k]<-out$'k'$position
}
mat
Error in mat[, , k] <- out$k$position :number of items to replace is not a multiple of replacement length
for (k in 1:2)
{
for (j in mat[,,k][40:1] ) {
s[j] <- s[j]+1
cost <- sum(ch*s)
if (cost>=C) break
}
}
s; cost
Error : Error in s[j] <- s[j] + 1 : NAs are not allowed in subscripted assignments
Please anyone help in resolving these errors.
We could apply the function directly by looping over the list. Note that each element of the list is a matrix
sapply(lst, is.matrix)
# 1 2
#TRUE TRUE
so, there is no need to unlist and create a matrix
out <- lapply(lst, nlargest, data = 40)
-checking with the OP's results
out1 <- nlargest(sub1, 40)
identical(out[[1]], out1)
#[1] TRUE
Update2
Based on the second update, we need to initialize 'cost' and 'sl' with the same length as 'k' elements. Here, we initialize 'sl' as a list of vectors
sl <- rep(list(c(1, 1, 1, 1)), 2)
C <- 150
cost <- numeric(2)
for (k in 1:2){
for (j in mat[,,k][40:1, 1] ) {
sl[[k]][j] <- sl[[k]][j]+1
cost[k] <- sum(ch*sl[[k]])
if (cost[k] >=C) break
}
}
sl
#[[1]]
#[1] 5 7 6 4
#[[2]]
#[1] 6 5 5 7
cost
#[1] 154 150

R: Calculating IV using Black-Scholes and bisection method, loop refusing to work

I have my Black-Scholes function and my bisection model for call options with data from a CSV. It appears to be getting stuck in the inner loop because it stays above the tolerance. My Black-Scholes does calculate accurately and I am using the average of bid and ask for the market price instead of the actual price of the option. After working on this for hours, maybe I am just missing something obvious.
The link to the CSV is here: http://s000.tinyupload.com/?file_id=06213890949979926112
########################################################################
#Black-Scholes-Merton Call
bsmCall <- function(S, K, M, sig, r) {
yrTime=(M/252)
d1 <- (log(S/K)+(r+(sig^2/2))*(yrTime))/(sig*(sqrt(yrTime)))
d2 <- d1-sig*(sqrt(yrTime))
C <- (S*(pnorm(d1)))-((pnorm(d2))*K*(exp(-r*yrTime)))
return(C)
}
########################################################################
myData = read.csv("09-26-16.csv", stringsAsFactors=FALSE) #DATA
myData <- myData[,2:24] #omit first column
####### start bisection method of CALLS and put IV in database #######
i <- 1 # reset counter
tol <- 0.000001 #tolerance
while(i <= nrow(myData)) {
if((myData[i,5] != 0) & (myData[i,6] != 0)) {
volLower <- .0001 #will need to reset with each iteration
volUpper <- 1 #will need to reset with each iteration
volMid <- (volLower + volUpper) / 2 #will need to reset with each iteration
while(abs(bsmCall(as.numeric(as.character(myData[i,17])),as.numeric(as.character(myData[i,1])),as.numeric(as.character(myData[i,22])),volMid,as.numeric(as.character(myData[i,23])))-(as.numeric(as.character(myData[i,5])))) >= tol) {
if((bsmCall(as.numeric(as.character(myData[i,17])),as.numeric(as.character(myData[i,1])),as.numeric(as.character(myData[i,22])),volMid,as.numeric(as.character(myData[i,23])))-(as.numeric(as.character(myData[i,5])))) < 0) {
volLower <- volMid
volMid <- (volUpper + volMid)/2
} else {
volUpper <- volMid
volMid <- (volLower + volMid)/2
}
}
myData[i,8] <- volMid
} else { myData[i,8] <- 0 }
i=i+1
}
The problem is here:
while(abs(bsmCall(as.numeric(as.character(myData[i,17])),
as.numeric(as.character(myData[i,1])),
as.numeric(as.character(myData[i,22])),
volMid,
as.numeric(as.character(myData[i,23])))-(as.numeric(as.character(myData[i,5])))) >= tol)
You're using a while loop on a condition that, if true, is always true. It's an infinite loop. On your first row of data this problem is encountered.
How to fix this error is specific to your use case, but if you just change while to if you'll see the loop complete immediately.
You asked about the bisection method. There are a few in packages and here's another from here:
bisect <- function(fn, lower, upper, tol=1.e-07, ...) {
f.lo <- fn(lower, ...)
f.hi <- fn(upper, ...)
feval <- 2
if (f.lo * f.hi > 0) stop("Root is not bracketed in the specified interval
\n")
chg <- upper - lower
while (abs(chg) > tol) {
x.new <- (lower + upper) / 2
f.new <- fn(x.new, ...)
if (abs(f.new) <= tol) break
if (f.lo * f.new < 0) upper <- x.new
if (f.hi * f.new < 0) lower <- x.new
chg <- upper - lower
feval <- feval + 1
}
list(x = x.new, value = f.new, fevals=feval)
}
# An example
fn1 <- function(x, a) {
exp(-x) - a*x
}
bisect(fn1, 0, 2, a=1)
bisect(fn1, 0, 2, a=2)
Recursive version:
bisectMatt <- function(fn, lo, hi, tol = 1e-7, ...) {
flo <- fn(lo, ...)
fhi <- fn(hi, ...)
if(flo * fhi > 0)
stop("root is not bracketed by lo and hi")
mid <- (lo + hi) / 2
fmid <- fn(mid, ...)
if(abs(fmid) <= tol || abs(hi-lo) <= tol)
return(mid)
if(fmid * fhi > 0)
return(bisectMatt(fn, lo, mid, tol, ...))
return(bisectMatt(fn, mid, hi, tol, ...))
}
Jeez, this is my 3rd edit so far...
Lets reconstruct the while loop when i=1 and print the volMid - the of the only part of the while condition that is updating after each iteration
i <- 1
volLower <- .0001 #will need to reset with each iteration
volUpper <- 1 #will need to reset with each iteration
volMid <- (volLower + volUpper) / 2 #will need to reset with each iteration
j <- 1
while(abs(bsmCall(myData[i,17], myData[i,1], myData[i,22],volMid,myData[i,23])-myData[i,5]) >= tol & j < 30) {
if(bsmCall(myData[i,17], myData[i,1], myData[i,22],volMid,myData[i,23])-myData[i,5] < 0) {
volLower <- volMid
volMid <- (volUpper + volMid)/2
} else {
print("pos")
volUpper <- volMid
volMid <- (volLower + volMid)/2
}
j <- j + 1
print(volMid)
}
Result:
#[1] 0.750025
#[1] 0.8750125
#[1] 0.9375062
#[1] 0.9687531
#[1] 0.9843766
#[1] 0.9921883
#[1] 0.9960941
#[1] 0.9980471
#[1] 0.9990235
#[1] 0.9995118
#[1] 0.9997559
#[1] 0.9998779
#[1] 0.999939
#[1] 0.9999695
#[1] 0.9999847
#[1] 0.9999924
#[1] 0.9999962
#[1] 0.9999981
#[1] 0.999999
#[1] 0.9999995
#[1] 0.9999998
#[1] 0.9999999
#[1] 0.9999999
#[1] 1
#[1] 1
#[1] 1
#[1] 1
#[1] 1
#[1] 1
volMid converges to 1 after less than 30 iterations, and from there on out, it's stuck.

Gram Schmidt with R

Here is a MATLAB code for performing Gram Schmidt in page 1
http://web.mit.edu/18.06/www/Essays/gramschmidtmat.pdf
I am trying for hours and hours to perform this with R since I don't have MATLAB
Here is my R
f=function(x){
m=nrow(x);
n=ncol(x);
Q=matrix(0,m,n);
R=matrix(0,n,n);
for(j in 1:n){
v=x[,j,drop=FALSE];
for(i in 1:j-1){
R[i,j]=t(Q[,i,drop=FALSE])%*%x[,j,drop=FALSE];
v=v-R[i,j]%*%Q[,i,drop=FALSE]
}
R[j,j]=max(svd(v)$d);
Q[,j,,drop=FALSE]=v/R[j,j]}
return(list(Q,R))
}
}
It keeps on saying there is errors in either:
v=v-R[i,j]%*%Q[,i,drop=FALSE]
or
R[j,j]=max(svd(v)$d);
What is it that I am doing wrong translating MATLAB code to R???
Just for fun I added an Armadillo version of this code and benchmark it
Armadillo code :
#include <RcppArmadillo.h>
// [[Rcpp::depends(RcppArmadillo)]]
using namespace Rcpp;
//[[Rcpp::export]]
List grahm_schimdtCpp(arma::mat A) {
int n = A.n_cols;
int m = A.n_rows;
arma::mat Q(m, n);
Q.fill(0);
arma::mat R(n, n);
R.fill(0);
for (int j = 0; j < n; j++) {
arma::vec v = A.col(j);
if (j > 0) {
for(int i = 0; i < j; i++) {
R(i, j) = arma::as_scalar(Q.col(i).t() * A.col(j));
v = v - R(i, j) * Q.col(i);
}
}
R(j, j) = arma::norm(v, 2);
Q.col(j) = v / R(j, j);
}
return List::create(_["Q"] = Q,
_["R"] = R
);
}
R code not optimized (directly based on algorithm)
grahm_schimdtR <- function(A) {
m <- nrow(A)
n <- ncol(A)
Q <- matrix(0, nrow = m, ncol = n)
R <- matrix(0, nrow = n, ncol = n)
for (j in 1:n) {
v <- A[ , j, drop = FALSE]
if (j > 1) {
for(i in 1:(j-1)) {
R[i, j] <- t(Q[,i,drop = FALSE]) %*% A[ , j, drop = FALSE]
v <- v - R[i, j] * Q[ ,i]
}
}
R[j, j] = norm(v, type = "2")
Q[ ,j] = v / R[j, j]
}
list("Q" = Q, "R" = R)
}
Native QR decomposition in R
qrNative <- function(A) {
qrdec <- qr(A)
list(Q = qr.R(qrdec), R = qr.Q(qrdec))
}
We will test it with the same matrix as in original document (link in the post above)
A <- matrix(c(4, 3, -2, 1), ncol = 2)
all.equal(grahm_schimdtR(A)$Q %*% grahm_schimdtR(A)$R, A)
## [1] TRUE
all.equal(grahm_schimdtCpp(A)$Q %*% grahm_schimdtCpp(A)$R, A)
## [1] TRUE
all.equal(qrNative(A)$Q %*% qrNative(A)$R, A)
## [1] TRUE
Now let's benchmark it
require(rbenchmark)
set.seed(123)
A <- matrix(rnorm(10000), 100, 100)
benchmark(qrNative(A),
grahm_schimdtR(A),
grahm_schimdtCpp(A),
order = "elapsed")
## test replications elapsed relative user.self
## 3 grahm_schimdtCpp(A) 100 0.272 1.000 0.272
## 1 qrNative(A) 100 1.013 3.724 1.144
## 2 grahm_schimdtR(A) 100 84.279 309.849 95.042
## sys.self user.child sys.child
## 3 0.000 0 0
## 1 0.872 0 0
## 2 72.577 0 0
I really love how easy to port code into Rcpp....
If you are translating code in Matlab into R, then code semantics (code logic) should remain same. For example, in your code, you are transposing Q in t(Q[,i,drop=FALSE]) as per the given Matlab code. But Q[,i,drop=FALSE] does not return the column in column vector. So, we can make it a column vector by using the statement:
matrix(Q[,i],n,1); # n is the number of rows.
There is no error in R[j,j]=max(svd(v)$d) if v is a vector (row or column).
Yes, there is an error in
v=v-R[i,j]%*%Q[,i,drop=FALSE]
because you are using a matrix multiplication. Instead you should use a normal multiplication:
v=v-R[i,j] * Q[,i,drop=FALSE]
Here R[i,j] is a number, whereas Q[,i,drop=FALSE] is a vector. So, dimension mismatch arises here.
One more thing, if j is 3 , then 1:j-1 returns [0,1,2]. So, it should be changed to 1:(j-1), which returns [1,2] for the same value for j. But there is a catch. If j is 2, then 1:(j-1) returns [1,0]. So, 0th index is undefined for a vector or a matrix. So, we can bypass 0 value by putting a conditional expression.
Here is a working code for Gram Schmidt algorithm:
A = matrix(c(4,3,-2,1),2,2)
m = nrow(A)
n = ncol(A)
Q = matrix(0,m,n)
R = matrix(0,n,n)
for(j in 1:n)
{
v = matrix(A[,j],n,1)
for(i in 1:(j-1))
{
if(i!=0)
{
R[i,j] = t(matrix(Q[,i],n,1))%*%matrix(A[,j],n,1)
v = v - (R[i,j] * matrix(Q[,i],n,1))
}
}
R[j,j] = svd(v)$d
Q[,j] = v/R[j,j]
}
If you need to wrap the code into a function, you can do so as per your convenience.
You could simply use Hans W. Borchers' pracma package, which provides many Octave/Matlab functions translated in R.
> library(pracma)
> gramSchmidt
function (A, tol = .Machine$double.eps^0.5)
{
stopifnot(is.numeric(A), is.matrix(A))
m <- nrow(A)
n <- ncol(A)
if (m < n)
stop("No. of rows of 'A' must be greater or equal no. of colums.")
Q <- matrix(0, m, n)
R <- matrix(0, n, n)
for (k in 1:n) {
Q[, k] <- A[, k]
if (k > 1) {
for (i in 1:(k - 1)) {
R[i, k] <- t(Q[, i]) %*% Q[, k]
Q[, k] <- Q[, k] - R[i, k] * Q[, i]
}
}
R[k, k] <- Norm(Q[, k])
if (abs(R[k, k]) <= tol)
stop("Matrix 'A' does not have full rank.")
Q[, k] <- Q[, k]/R[k, k]
}
return(list(Q = Q, R = R))
}
<environment: namespace:pracma>
Here a version very similar to yours but without the use of the extra variabale v. I use directly the Q matrix. So no need to use drop. Of course since you have j-1 in the index you need to add the condition j>1.
f=function(x){
m <- nrow(x)
n <- ncol(x)
Q <- matrix(0, m, n)
R <- matrix(0, n, n)
for (j in 1:n) {
Q[, j] <- x[, j]
if (j > 1) {
for (i in 1:(j - 1)) {
R[i, j] <- t(Q[, i]) %*% Q[, j]
Q[, j] <- Q[, j] - R[i, j] * Q[, i]
}
}
R[j, j] <- max(svd(Q[, j])$d)
Q[, j] <- Q[, j]/R[j, j]
}
return(list(Q = Q, R = R))
}
EDIT add some benchmarking:
To get some real case I use the Hilbert matrix from the Matrix package.
library(microbenchmark)
library(Matrix)
A <- as.matrix(Hilbert(100))
microbenchmark(grahm_schimdtR(A),
grahm_schimdtCpp(A),times = 100L)
Unit: milliseconds
expr min lq median uq max neval
grahm_schimdtR(A) 330.77424 335.648063 337.443273 343.72888 601.793201 100
grahm_schimdtCpp(A) 1.45445 1.510768 1.615255 1.66816 2.062018 100
As expected CPP solution is really fster.
A verbatim implementation of the following matlab code (shown in the next figure) in base R to obtain orthonormal basis vectors with Gram-Schmidt algorithm is shown below:
Gram_Schmidt <- function(A) {
n <- ncol(A)
Q <- 0*A
R <- matrix(rep(0, n*n), nrow=n)
for (j in 1:n) {
v <- A[,j]
if (j > 1) # the first basis vector to be included in Q anyway (after normalization)
for (i in 1:(j-1)) {
R[i, j] <- t(Q[,i]) %*% A[,j]
v <- v - R[i,j] * Q[,i] # subtract the projections on other orthonormal basis vectors constructed so far
}
R[j,j] <- sqrt(v %*% v)
Q[,j] <- v / R[j,j]
}
return(list(Q=Q, R=R))
}
Given the matrix A, we obtain the following results as expected:
A <- matrix(c(4,3,-2,1), nrow=2)
Gram_Schmidt(A)
#$Q
# [,1] [,2]
# [1,] 0.8 -0.6
# [2,] 0.6 0.8
#$R
# [,1] [,2]
#[1,] 5 -1
#[2,] 0 2
Using QR decomposition with base R again,
Gram_Schmidt_QR <- function(A) {
res <- qr(A)
return(list(Q=qr.Q(res), R=qr.R(res)))
}
Gram_Schmidt_QR(A)
#$Q
# [,1] [,2]
# [1,] 0.8 -0.6
# [2,] 0.6 0.8
#$R
# [,1] [,2]
#[1,] 5 -1
#[2,] 0 2
Also, we could use R library matlib's implementation, it only outputs the orthonormal Q matrix though and not the upper triangular matrix R:
library(matlib)
GramSchmidt(A)
# [,1] [,2]
#[1,] 0.8 -0.6
#[2,] 0.6 0.8
Finally, some performance benchmarking gives the following result:
library(ggplot2)
library(microbenchmark)
autoplot(microbenchmark(Gram_Schmidt(A),
Gram_Schmidt_QR(A),
GramSchmidt(A), times=1000L))

R Function for returning ALL factors

My normal search foo is failing me. I'm trying to find an R function that returns ALL of the factors of an integer. There are at least 2 packages with factorize() functions: gmp and conf.design, however these functions return only prime factors. I'd like a function that returns all factors.
Obviously searching for this is made difficult since R has a construct called factors which puts a lot of noise in the search.
To follow up on my comment (thanks to #Ramnath for my typo), the brute force method seems to work reasonably well here on my 64 bit 8 gig machine:
FUN <- function(x) {
x <- as.integer(x)
div <- seq_len(abs(x))
factors <- div[x %% div == 0L]
factors <- list(neg = -factors, pos = factors)
return(factors)
}
A few examples:
> FUN(100)
$neg
[1] -1 -2 -4 -5 -10 -20 -25 -50 -100
$pos
[1] 1 2 4 5 10 20 25 50 100
> FUN(-42)
$neg
[1] -1 -2 -3 -6 -7 -14 -21 -42
$pos
[1] 1 2 3 6 7 14 21 42
#and big number
> system.time(FUN(1e8))
user system elapsed
1.95 0.18 2.14
You can get all factors from the prime factors. gmp calculates these very quickly.
library(gmp)
library(plyr)
get_all_factors <- function(n)
{
prime_factor_tables <- lapply(
setNames(n, n),
function(i)
{
if(i == 1) return(data.frame(x = 1L, freq = 1L))
plyr::count(as.integer(gmp::factorize(i)))
}
)
lapply(
prime_factor_tables,
function(pft)
{
powers <- plyr::alply(pft, 1, function(row) row$x ^ seq.int(0L, row$freq))
power_grid <- do.call(expand.grid, powers)
sort(unique(apply(power_grid, 1, prod)))
}
)
}
get_all_factors(c(1, 7, 60, 663, 2520, 75600, 15876000, 174636000, 403409160000))
Update
This is now implemented in the package RcppBigIntAlgos. See this answer for more details.
Original Post
The algorithm has been fully updated and now implements multiple polynomials as well as some clever sieving techniques that eliminates millions of checks. In addition to the original links, this paper along with this post from primo were very helpful for this last stage (many kudos to primo). Primo does a great job of explaining the guts of the QS in a relatively short space and also wrote a pretty amazing algorithm (it will factor the number at the bottom, 38! + 1, in under 2 secs!! Insane!!).
As promised, below is my humble R implementation of the Quadratic Sieve. I have been working on this algorithm sporadically since I promised it in late January. I will not try to explain it fully (unless requested... also, the links below do a very good job) as it is very complicated and hopefully, my function names speak for themselves. This has proved to be one of the most challenging algorithms I have ever attempted to execute as it is demanding both from a programmer's point of view as well as mathematically. I have read countless papers and ultimately, I found these five to be the most helpful (QSieve1, QSieve2, QSieve3, QSieve4, QSieve5).
N.B. This algorithm, as it stands, does not serve very well as a general prime factorization algorithm. If it was optimized further, it would need to be accompanied by a section of code that factors out smaller primes (i.e. less than 10^5 as suggested by this post), then call QuadSieveAll, check to see if these are primes, and if not, call QuadSieveAll on both of these factors, etc. until you are left with all primes (all of these steps are not that difficult). However, the main point of this post is to highlight the heart of the Quadratic Sieve, so the examples below are all semiprimes (even though it will factor most odd numbers not containing a square… Also, I haven’t seen an example of the QS that didn’t demonstrate a non-semiprime). I know the OP was looking for a method to return all factors and not the prime factorization, but this algorithm (if optimized further) coupled with one of the algorithms above would be a force to reckon with as a general factoring algorithm (especially given that the OP was needing something for Project Euler, which usually requires much more than brute force methods). By the way, the MyIntToBit function is a variation of this answer and the PrimeSieve is from a post that #Dontas appeared on a while back (Kudos on that as well).
QuadSieveMultiPolysAll <- function(MyN, fudge1=0L, fudge2=0L, LenB=0L) {
### 'MyN' is the number to be factored; 'fudge1' is an arbitrary number
### that is used to determine the size of your prime base for sieving;
### 'fudge2' is used to set a threshold for sieving;
### 'LenB' is a the size of the sieving interval. The last three
### arguments are optional (they are determined based off of the
### size of MyN if left blank)
### The first 8 functions are helper functions
PrimeSieve <- function(n) {
n <- as.integer(n)
if (n > 1e9) stop("n too large")
primes <- rep(TRUE, n)
primes[1] <- FALSE
last.prime <- 2L
fsqr <- floor(sqrt(n))
while (last.prime <= fsqr) {
primes[seq.int(last.prime^2, n, last.prime)] <- FALSE
sel <- which(primes[(last.prime + 1):(fsqr + 1)])
if (any(sel)) {
last.prime <- last.prime + min(sel)
} else {
last.prime <- fsqr + 1
}
}
MyPs <- which(primes)
rm(primes)
gc()
MyPs
}
MyIntToBit <- function(x, dig) {
i <- 0L
string <- numeric(dig)
while (x > 0) {
string[dig - i] <- x %% 2L
x <- x %/% 2L
i <- i + 1L
}
string
}
ExpBySquaringBig <- function(x, n, p) {
if (n == 1) {
MyAns <- mod.bigz(x,p)
} else if (mod.bigz(n,2)==0) {
MyAns <- ExpBySquaringBig(mod.bigz(pow.bigz(x,2),p),div.bigz(n,2),p)
} else {
MyAns <- mod.bigz(mul.bigz(x,ExpBySquaringBig(mod.bigz(
pow.bigz(x,2),p), div.bigz(sub.bigz(n,1),2),p)),p)
}
MyAns
}
TonelliShanks <- function(a,p) {
P1 <- sub.bigz(p,1); j <- 0L; s <- P1
while (mod.bigz(s,2)==0L) {s <- s/2; j <- j+1L}
if (j==1L) {
MyAns1 <- ExpBySquaringBig(a,(p+1L)/4,p)
MyAns2 <- mod.bigz(-1 * ExpBySquaringBig(a,(p+1L)/4,p),p)
} else {
n <- 2L
Legendre2 <- ExpBySquaringBig(n,P1/2,p)
while (Legendre2==1L) {n <- n+1L; Legendre2 <- ExpBySquaringBig(n,P1/2,p)}
x <- ExpBySquaringBig(a,(s+1L)/2,p)
b <- ExpBySquaringBig(a,s,p)
g <- ExpBySquaringBig(n,s,p)
r <- j; m <- 1L
Test <- mod.bigz(b,p)
while (!(Test==1L) && !(m==0L)) {
m <- 0L
Test <- mod.bigz(b,p)
while (!(Test==1L)) {m <- m+1L; Test <- ExpBySquaringBig(b,pow.bigz(2,m),p)}
if (!m==0) {
x <- mod.bigz(x * ExpBySquaringBig(g,pow.bigz(2,r-m-1L),p),p)
g <- ExpBySquaringBig(g,pow.bigz(2,r-m),p)
b <- mod.bigz(b*g,p); r <- m
}; Test <- 0L
}; MyAns1 <- x; MyAns2 <- mod.bigz(p-x,p)
}
c(MyAns1, MyAns2)
}
SieveLists <- function(facLim, FBase, vecLen, sieveD, MInt) {
vLen <- ceiling(vecLen/2); SecondHalf <- (vLen+1L):vecLen
MInt1 <- MInt[1:vLen]; MInt2 <- MInt[SecondHalf]
tl <- vector("list",length=facLim)
for (m in 3:facLim) {
st1 <- mod.bigz(MInt1[1],FBase[m])
m1 <- 1L+as.integer(mod.bigz(sieveD[[m]][1] - st1,FBase[m]))
m2 <- 1L+as.integer(mod.bigz(sieveD[[m]][2] - st1,FBase[m]))
sl1 <- seq.int(m1,vLen,FBase[m])
sl2 <- seq.int(m2,vLen,FBase[m])
tl1 <- list(sl1,sl2)
st2 <- mod.bigz(MInt2[1],FBase[m])
m3 <- vLen+1L+as.integer(mod.bigz(sieveD[[m]][1] - st2,FBase[m]))
m4 <- vLen+1L+as.integer(mod.bigz(sieveD[[m]][2] - st2,FBase[m]))
sl3 <- seq.int(m3,vecLen,FBase[m])
sl4 <- seq.int(m4,vecLen,FBase[m])
tl2 <- list(sl3,sl4)
tl[[m]] <- list(tl1,tl2)
}
tl
}
SieverMod <- function(facLim, FBase, vecLen, SD, MInt, FList, LogFB, Lim, myCol) {
MyLogs <- rep(0,nrow(SD))
for (m in 3:facLim) {
MyBool <- rep(FALSE,vecLen)
MyBool[c(FList[[m]][[1]][[1]],FList[[m]][[2]][[1]])] <- TRUE
MyBool[c(FList[[m]][[1]][[2]],FList[[m]][[2]][[2]])] <- TRUE
temp <- which(MyBool)
MyLogs[temp] <- MyLogs[temp] + LogFB[m]
}
MySieve <- which(MyLogs > Lim)
MInt <- MInt[MySieve]; NewSD <- SD[MySieve,]
newLen <- length(MySieve); GoForIT <- FALSE
MyMat <- matrix(integer(0),nrow=newLen,ncol=myCol)
MyMat[which(NewSD[,1L] < 0),1L] <- 1L; MyMat[which(NewSD[,1L] > 0),1L] <- 0L
if ((myCol-1L) - (facLim+1L) > 0L) {MyMat[,((facLim+2L):(myCol-1L))] <- 0L}
if (newLen==1L) {MyMat <- matrix(MyMat,nrow=1,byrow=TRUE)}
if (newLen > 0L) {
GoForIT <- TRUE
for (m in 1:facLim) {
vec <- rep(0L,newLen)
temp <- which((NewSD[,1L]%%FBase[m])==0L)
NewSD[temp,] <- NewSD[temp,]/FBase[m]; vec[temp] <- 1L
test <- temp[which((NewSD[temp,]%%FBase[m])==0L)]
while (length(test)>0L) {
NewSD[test,] <- NewSD[test,]/FBase[m]
vec[test] <- (vec[test]+1L)
test <- test[which((NewSD[test,]%%FBase[m])==0L)]
}
MyMat[,m+1L] <- vec
}
}
list(MyMat,NewSD,MInt,GoForIT)
}
reduceMatrix <- function(mat) {
tempMin <- 0L; n1 <- ncol(mat); n2 <- nrow(mat)
mymax <- 1L
for (i in 1:n1) {
temp <- which(mat[,i]==1L)
t <- which(temp >= mymax)
if (length(temp)>0L && length(t)>0L) {
MyMin <- min(temp[t])
if (!(MyMin==mymax)) {
vec <- mat[MyMin,]
mat[MyMin,] <- mat[mymax,]
mat[mymax,] <- vec
}
t <- t[-1]; temp <- temp[t]
for (j in temp) {mat[j,] <- (mat[j,]+mat[mymax,])%%2L}
mymax <- mymax+1L
}
}
if (mymax<n2) {simpMat <- mat[-(mymax:n2),]} else {simpMat <- mat}
lenSimp <- nrow(simpMat)
if (is.null(lenSimp)) {lenSimp <- 0L}
mycols <- 1:n1
if (lenSimp>1L) {
## "Diagonalizing" Matrix
for (i in 1:lenSimp) {
if (all(simpMat[i,]==0L)) {simpMat <- simpMat[-i,]; next}
if (!simpMat[i,i]==1L) {
t <- min(which(simpMat[i,]==1L))
vec <- simpMat[,i]; tempCol <- mycols[i]
simpMat[,i] <- simpMat[,t]; mycols[i] <- mycols[t]
simpMat[,t] <- vec; mycols[t] <- tempCol
}
}
lenSimp <- nrow(simpMat); MyList <- vector("list",length=n1)
MyFree <- mycols[which((1:n1)>lenSimp)]; for (i in MyFree) {MyList[[i]] <- i}
if (is.null(lenSimp)) {lenSimp <- 0L}
if (lenSimp>1L) {
for (i in lenSimp:1L) {
t <- which(simpMat[i,]==1L)
if (length(t)==1L) {
simpMat[ ,t] <- 0L
MyList[[mycols[i]]] <- 0L
} else {
t1 <- t[t>i]
if (all(t1 > lenSimp)) {
MyList[[mycols[i]]] <- MyList[[mycols[t1[1]]]]
if (length(t1)>1) {
for (j in 2:length(t1)) {MyList[[mycols[i]]] <- c(MyList[[mycols[i]]], MyList[[mycols[t1[j]]]])}
}
}
else {
for (j in t1) {
if (length(MyList[[mycols[i]]])==0L) {MyList[[mycols[i]]] <- MyList[[mycols[j]]]}
else {
e1 <- which(MyList[[mycols[i]]]%in%MyList[[mycols[j]]])
if (length(e1)==0) {
MyList[[mycols[i]]] <- c(MyList[[mycols[i]]],MyList[[mycols[j]]])
} else {
e2 <- which(!MyList[[mycols[j]]]%in%MyList[[mycols[i]]])
MyList[[mycols[i]]] <- MyList[[mycols[i]]][-e1]
if (length(e2)>0L) {MyList[[mycols[i]]] <- c(MyList[[mycols[i]]], MyList[[mycols[j]]][e2])}
}
}
}
}
}
}
TheList <- lapply(MyList, function(x) {if (length(x)==0L) {0} else {x}})
list(TheList,MyFree)
} else {
list(NULL,NULL)
}
} else {
list(NULL,NULL)
}
}
GetFacs <- function(vec1, vec2, n) {
x <- mod.bigz(prod.bigz(vec1),n)
y <- mod.bigz(prod.bigz(vec2),n)
MyAns <- c(gcd.bigz(x-y,n),gcd.bigz(x+y,n))
MyAns[sort.list(asNumeric(MyAns))]
}
SolutionSearch <- function(mymat, M2, n, FB) {
colTest <- which(apply(mymat, 2, sum) == 0)
if (length(colTest) > 0) {solmat <- mymat[ ,-colTest]} else {solmat <- mymat}
if (length(nrow(solmat)) > 0) {
nullMat <- reduceMatrix(t(solmat %% 2L))
listSol <- nullMat[[1]]; freeVar <- nullMat[[2]]; LF <- length(freeVar)
} else {LF <- 0L}
if (LF > 0L) {
for (i in 2:min(10^8,(2^LF + 1L))) {
PosAns <- MyIntToBit(i, LF)
posVec <- sapply(listSol, function(x) {
t <- which(freeVar %in% x)
if (length(t)==0L) {
0
} else {
sum(PosAns[t])%%2L
}
})
ansVec <- which(posVec==1L)
if (length(ansVec)>0) {
if (length(ansVec) > 1L) {
myY <- apply(mymat[ansVec,],2,sum)
} else {
myY <- mymat[ansVec,]
}
if (sum(myY %% 2) < 1) {
myY <- as.integer(myY/2)
myY <- pow.bigz(FB,myY[-1])
temp <- GetFacs(M2[ansVec], myY, n)
if (!(1==temp[1]) && !(1==temp[2])) {
return(temp)
}
}
}
}
}
}
### Below is the main portion of the Quadratic Sieve
BegTime <- Sys.time(); MyNum <- as.bigz(MyN); DigCount <- nchar(as.character(MyN))
P <- PrimeSieve(10^5)
SqrtInt <- .mpfr2bigz(trunc(sqrt(mpfr(MyNum,sizeinbase(MyNum,b=2)+5L))))
if (DigCount < 24) {
DigSize <- c(4,10,15,20,23)
f_Pos <- c(0.5,0.25,0.15,0.1,0.05)
MSize <- c(5000,7000,10000,12500,15000)
if (fudge1==0L) {
LM1 <- lm(f_Pos ~ DigSize)
m1 <- summary(LM1)$coefficients[2,1]
b1 <- summary(LM1)$coefficients[1,1]
fudge1 <- DigCount*m1 + b1
}
if (LenB==0L) {
LM2 <- lm(MSize ~ DigSize)
m2 <- summary(LM2)$coefficients[2,1]
b2 <- summary(LM2)$coefficients[1,1]
LenB <- ceiling(DigCount*m2 + b2)
}
LimB <- trunc(exp((.5+fudge1)*sqrt(log(MyNum)*log(log(MyNum)))))
B <- P[P<=LimB]; B <- B[-1]
facBase <- P[which(sapply(B, function(x) ExpBySquaringBig(MyNum,(x-1)/2,x)==1L))+1L]
LenFBase <- length(facBase)+1L
} else if (DigCount < 67) {
## These values were obtained from "The Multiple Polynomial
## Quadratic Sieve" by Robert D. Silverman
DigSize <- c(24,30,36,42,48,54,60,66)
FBSize <- c(100,200,400,900,1200,2000,3000,4500)
MSize <- c(5,25,25,50,100,250,350,500)
LM1 <- loess(FBSize ~ DigSize)
LM2 <- loess(MSize ~ DigSize)
if (fudge1==0L) {
fudge1 <- -0.4
LimB <- trunc(exp((.5+fudge1)*sqrt(log(MyNum)*log(log(MyNum)))))
myTarget <- ceiling(predict(LM1, DigCount))
while (LimB < myTarget) {
LimB <- trunc(exp((.5+fudge1)*sqrt(log(MyNum)*log(log(MyNum)))))
fudge1 <- fudge1+0.001
}
B <- P[P<=LimB]; B <- B[-1]
facBase <- P[which(sapply(B, function(x) ExpBySquaringBig(MyNum,(x-1)/2,x)==1L))+1L]
LenFBase <- length(facBase)+1L
while (LenFBase < myTarget) {
fudge1 <- fudge1+0.005
LimB <- trunc(exp((.5+fudge1)*sqrt(log(MyNum)*log(log(MyNum)))))
myind <- which(P==max(B))+1L
myset <- tempP <- P[myind]
while (tempP < LimB) {
myind <- myind + 1L
tempP <- P[myind]
myset <- c(myset, tempP)
}
for (p in myset) {
t <- ExpBySquaringBig(MyNum,(p-1)/2,p)==1L
if (t) {facBase <- c(facBase,p)}
}
B <- c(B, myset)
LenFBase <- length(facBase)+1L
}
} else {
LimB <- trunc(exp((.5+fudge1)*sqrt(log(MyNum)*log(log(MyNum)))))
B <- P[P<=LimB]; B <- B[-1]
facBase <- P[which(sapply(B, function(x) ExpBySquaringBig(MyNum,(x-1)/2,x)==1L))+1L]
LenFBase <- length(facBase)+1L
}
if (LenB==0L) {LenB <- 1000*ceiling(predict(LM2, DigCount))}
} else {
return("The number you've entered is currently too big for this algorithm!!")
}
SieveDist <- lapply(facBase, function(x) TonelliShanks(MyNum,x))
SieveDist <- c(1L,SieveDist); SieveDist[[1]] <- c(SieveDist[[1]],1L); facBase <- c(2L,facBase)
Lower <- -LenB; Upper <- LenB; LenB2 <- 2*LenB+1L; MyInterval <- Lower:Upper
M <- MyInterval + SqrtInt ## Set that will be tested
SqrDiff <- matrix(sub.bigz(pow.bigz(M,2),MyNum),nrow=length(M),ncol=1L)
maxM <- max(MyInterval)
LnFB <- log(facBase)
## N.B. primo uses 0.735, as his siever
## is more efficient than the one employed here
if (fudge2==0L) {
if (DigCount < 8) {
fudge2 <- 0
} else if (DigCount < 12) {
fudge2 <- .7
} else if (DigCount < 20) {
fudge2 <- 1.3
} else {
fudge2 <- 1.6
}
}
TheCut <- log10(maxM*sqrt(2*asNumeric(MyNum)))*fudge2
myPrimes <- as.bigz(facBase)
CoolList <- SieveLists(LenFBase, facBase, LenB2, SieveDist, MyInterval)
GetMatrix <- SieverMod(LenFBase, facBase, LenB2, SqrDiff, M, CoolList, LnFB, TheCut, LenFBase+1L)
if (GetMatrix[[4]]) {
newmat <- GetMatrix[[1]]; NewSD <- GetMatrix[[2]]; M <- GetMatrix[[3]]
NonSplitFacs <- which(abs(NewSD[,1L])>1L)
newmat <- newmat[-NonSplitFacs, ]
M <- M[-NonSplitFacs]
lenM <- length(M)
if (class(newmat) == "matrix") {
if (nrow(newmat) > 0) {
PosAns <- SolutionSearch(newmat,M,MyNum,myPrimes)
} else {
PosAns <- vector()
}
} else {
newmat <- matrix(newmat, nrow = 1)
PosAns <- vector()
}
} else {
newmat <- matrix(integer(0),ncol=(LenFBase+1L))
PosAns <- vector()
}
Atemp <- .mpfr2bigz(trunc(sqrt(sqrt(mpfr(2*MyNum))/maxM)))
if (Atemp < max(facBase)) {Atemp <- max(facBase)}; myPoly <- 0L
while (length(PosAns)==0L) {LegTest <- TRUE
while (LegTest) {
Atemp <- nextprime(Atemp)
Legendre <- asNumeric(ExpBySquaringBig(MyNum,(Atemp-1L)/2,Atemp))
if (Legendre == 1) {LegTest <- FALSE}
}
A <- Atemp^2
Btemp <- max(TonelliShanks(MyNum, Atemp))
B2 <- (Btemp + (MyNum - Btemp^2) * inv.bigz(2*Btemp,Atemp))%%A
C <- as.bigz((B2^2 - MyNum)/A)
myPoly <- myPoly + 1L
polySieveD <- lapply(1:LenFBase, function(x) {
AInv <- inv.bigz(A,facBase[x])
asNumeric(c(((SieveDist[[x]][1]-B2)*AInv)%%facBase[x],
((SieveDist[[x]][2]-B2)*AInv)%%facBase[x]))
})
M1 <- A*MyInterval + B2
SqrDiff <- matrix(A*pow.bigz(MyInterval,2) + 2*B2*MyInterval + C,nrow=length(M1),ncol=1L)
CoolList <- SieveLists(LenFBase, facBase, LenB2, polySieveD, MyInterval)
myPrimes <- c(myPrimes,Atemp)
LenP <- length(myPrimes)
GetMatrix <- SieverMod(LenFBase, facBase, LenB2, SqrDiff, M1, CoolList, LnFB, TheCut, LenP+1L)
if (GetMatrix[[4]]) {
n2mat <- GetMatrix[[1]]; N2SD <- GetMatrix[[2]]; M1 <- GetMatrix[[3]]
n2mat[,LenP+1L] <- rep(2L,nrow(N2SD))
if (length(N2SD) > 0) {NonSplitFacs <- which(abs(N2SD[,1L])>1L)} else {NonSplitFacs <- LenB2}
if (length(NonSplitFacs)<2*LenB) {
M1 <- M1[-NonSplitFacs]; lenM1 <- length(M1)
n2mat <- n2mat[-NonSplitFacs,]
if (lenM1==1L) {n2mat <- matrix(n2mat,nrow=1)}
if (ncol(newmat) < (LenP+1L)) {
numCol <- (LenP + 1L) - ncol(newmat)
newmat <- cbind(newmat,matrix(rep(0L,numCol*nrow(newmat)),ncol=numCol))
}
newmat <- rbind(newmat,n2mat); lenM <- lenM+lenM1; M <- c(M,M1)
if (class(newmat) == "matrix") {
if (nrow(newmat) > 0) {
PosAns <- SolutionSearch(newmat,M,MyNum,myPrimes)
}
}
}
}
}
EndTime <- Sys.time()
TotTime <- EndTime - BegTime
print(format(TotTime))
return(PosAns)
}
With Old QS algorithm
> library(gmp)
> library(Rmpfr)
> n3 <- prod(nextprime(urand.bigz(2, 40, 17)))
> system.time(t5 <- QuadSieveAll(n3,0.1,myps))
user system elapsed
164.72 0.77 165.63
> system.time(t6 <- factorize(n3))
user system elapsed
0.1 0.0 0.1
> all(t5[sort.list(asNumeric(t5))]==t6[sort.list(asNumeric(t6))])
[1] TRUE
With New Muli-Polynomial QS algorithm
> QuadSieveMultiPolysAll(n3)
[1] "4.952 secs"
Big Integer ('bigz') object of length 2:
[1] 342086446909 483830424611
> n4 <- prod(nextprime(urand.bigz(2,50,5)))
> QuadSieveMultiPolysAll(n4) ## With old algo, it took over 4 hours
[1] "1.131717 mins"
Big Integer ('bigz') object of length 2:
[1] 166543958545561 880194119571287
> n5 <- as.bigz("94968915845307373740134800567566911") ## 35 digits
> QuadSieveMultiPolysAll(n5)
[1] "3.813167 mins"
Big Integer ('bigz') object of length 2:
[1] 216366620575959221 438925910071081891
> system.time(factorize(n5)) ## It appears we are reaching the limits of factorize
user system elapsed
131.97 0.00 131.98
Side note: The number n5 above is a very interesting number. Check it out here
The Breaking Point!!!!
> n6 <- factorialZ(38) + 1L ## 45 digits
> QuadSieveMultiPolysAll(n6)
[1] "22.79092 mins"
Big Integer ('bigz') object of length 2:
[1] 14029308060317546154181 37280713718589679646221
> system.time(factorize(n6)) ## Shut it down after 2 days of running
Latest Triumph (50 digits)
> n9 <- prod(nextprime(urand.bigz(2,82,42)))
> QuadSieveMultiPolysAll(n9)
[1] "12.9297 hours"
Big Integer ('bigz') object of length 2:
[1] 2128750292720207278230259 4721136619794898059404993
## Based off of some crude test, factorize(n9) would take more than a year.
It should be noted that the QS generally doesn't perform as well as the Pollard's rho algorithm on smaller numbers and the power of the QS starts to become apparent as the numbers get larger.
The following approach deliver correct results, even in cases of really big numbers (which should be passed as strings). And it's really fast.
# TEST
# x <- as.bigz("12345678987654321")
# all_divisors(x)
# all_divisors(x*x)
# x <- pow.bigz(2,89)-1
# all_divisors(x)
library(gmp)
options(scipen =30)
sort_listz <- function(z) {
#==========================
z <- z[order(as.numeric(z))] # sort(z)
} # function sort_listz
mult_listz <- function(x,y) {
do.call('c', lapply(y, function(i) i*x))
}
all_divisors <- function(x) {
#==========================
if (abs(x)<=1) return(x)
else {
factorsz <- as.bigz(factorize(as.bigz(x))) # factorize returns up to
# e.g. x= 12345678987654321 factors: 3 3 3 3 37 37 333667 333667
factorsz <- sort_listz(factorsz) # vector of primes, sorted
prime_factorsz <- unique(factorsz)
#prime_ekt <- sapply(prime_factorsz, function(i) length( factorsz [factorsz==i]))
prime_ekt <- vapply(prime_factorsz, function(i) sum(factorsz==i), integer(1), USE.NAMES=FALSE)
spz <- vector() # keep all divisors
all <-1
n <- length(prime_factorsz)
for (i in 1:n) {
pr <- prime_factorsz[i]
pe <- prime_ekt[i]
all <- all*(pe+1) #counts all divisors
prz <- as.bigz(pr)
pse <- vector(mode="raw",length=pe+1)
pse <- c( as.bigz(1), prz)
if (pe>1) {
for (k in 2:pe) {
prz <- prz*pr
pse[k+1] <- prz
} # for k
} # if pe>1
if (i>1) {
spz <- mult_listz (spz, pse)
} else {
spz <- pse;
} # if i>1
} #for n
spz <- sort_listz (spz)
return (spz)
}
} # function factors_all_divisors
#====================================
Refined version, very fast. Code remains simple, readable & clean.
TEST
#Test 4 (big prime factor)
x <- pow.bigz(2,256)+1 # = 1238926361552897 * 93461639715357977769163558199606896584051237541638188580280321
system.time(z2 <- all_divisors(x))
# user system elapsed
# 19.27 1.27 20.56
#Test 5 (big prime factor)
x <- as.bigz("12345678987654321321") # = 3 * 19 * 216590859432531953
system.time(x2 <- all_divisors(x^2))
#user system elapsed
#25.65 0.00 25.67
Major Update
Below is my latest R factorization algorithm. It is way faster and pays homage to the rle function.
Algorithm 3 (Updated)
library(gmp)
MyFactors <- function(MyN) {
myRle <- function (x1) {
n1 <- length(x1)
y1 <- x1[-1L] != x1[-n1]
i <- c(which(y1), n1)
list(lengths = diff(c(0L, i)), values = x1[i], uni = sum(y1)+1L)
}
if (MyN==1L) return(MyN)
else {
pfacs <- myRle(factorize(MyN))
unip <- pfacs$values
pv <- pfacs$lengths
n <- pfacs$uni
myf <- unip[1L]^(0L:pv[1L])
if (n > 1L) {
for (j in 2L:n) {
myf <- c(myf, do.call(c,lapply(unip[j]^(1L:pv[j]), function(x) x*myf)))
}
}
}
myf[order(asNumeric(myf))] ## 'order' is faster than 'sort.list'
}
Below are the new benchmarks (As Dirk Eddelbuettel says here, "Can't argue with empirics."):
Case 1 (large prime factors)
set.seed(100)
myList <- lapply(1:10^3, function(x) sample(10^6, 10^5))
benchmark(SortList=lapply(myList, function(x) sort.list(x)),
OrderFun=lapply(myList, function(x) order(x)),
replications=3,
columns = c("test", "replications", "elapsed", "relative"))
test replications elapsed relative
2 OrderFun 3 59.41 1.000
1 SortList 3 61.52 1.036
## The times are limited by "gmp::factorize" and since it relies on
## pseudo-random numbers, the times can vary (i.e. one pseudo random
## number may lead to a factorization faster than others). With this
## in mind, any differences less than a half of second
## (or so) should be viewed as the same.
x <- pow.bigz(2,256)+1
system.time(z1 <- MyFactors(x))
user system elapsed
14.94 0.00 14.94
system.time(z2 <- all_divisors(x)) ## system.time(factorize(x))
user system elapsed ## user system elapsed
14.94 0.00 14.96 ## 14.94 0.00 14.94
all(z1==z2)
[1] TRUE
x <- as.bigz("12345678987654321321")
system.time(x1 <- MyFactors(x^2))
user system elapsed
20.66 0.02 20.71
system.time(x2 <- all_divisors(x^2)) ## system.time(factorize(x^2))
user system elapsed ## user system elapsed
20.69 0.00 20.69 ## 20.67 0.00 20.67
all(x1==x2)
[1] TRUE
Case 2 (smaller numbers)
set.seed(199)
samp <- sample(10^9, 10^5)
benchmark(JosephDivs=sapply(samp, MyFactors),
DontasDivs=sapply(samp, all_divisors),
OldDontas=sapply(samp, Oldall_divisors),
replications=10,
columns = c("test", "replications", "elapsed", "relative"),
order = "relative")
test replications elapsed relative
1 JosephDivs 10 470.31 1.000
2 DontasDivs 10 567.10 1.206 ## with vapply(..., USE.NAMES = FALSE)
3 OldDontas 10 626.19 1.331 ## with sapply
Case 3 (for complete thoroughness)
set.seed(97)
samp <- sample(10^6, 10^4)
benchmark(JosephDivs=sapply(samp, MyFactors),
DontasDivs=sapply(samp, all_divisors),
CottonDivs=sapply(samp, get_all_factors),
ChaseDivs=sapply(samp, FUN),
replications=5,
columns = c("test", "replications", "elapsed", "relative"),
order = "relative")
test replications elapsed relative
1 JosephDivs 5 22.68 1.000
2 DontasDivs 5 27.66 1.220
3 CottonDivs 5 126.66 5.585
4 ChaseDivs 5 554.25 24.438
Original Post
The algorithm by #RichieCotton is a very nice R implementation. The brute force method will only get you so far and fails with large numbers. I have provided three algorithms that will meet different needs. The first one (is the original algorithm I posted in Jan 15 and has been updated slightly), is a stand-alone factorization algorithm which offers a combinatorial approach that is efficient, accurate, and can be easily translated into other languages. The second algorithm is more of a sieve that is very fast and extremely useful when you need the factorization of thousands of numbers quickly. The third is a short (posted above), yet powerful stand-alone algorithm that is superior for any number less than 2^70 (I scrapped almost everything from my original code). I drew inspiration from Richie Cotton's use of the plyr::count function (it inspired me to write my own rle function that has a very similar return as plyr::count), George Dontas's clean way of handling the trivial case (i.e. if (n==1) return(1)), and the solution provided by #Zelazny7 to a question I had regarding bigz vectors.
Algorithm 1 (original)
library(gmp)
factor2 <- function(MyN) {
if (MyN == 1) return(1L)
else {
max_p_div <- factorize(MyN)
prime_vec <- max_p_div <- max_p_div[sort.list(asNumeric(max_p_div))]
my_factors <- powers <- as.bigz(vector())
uni_p <- unique(prime_vec); maxp <- max(prime_vec)
for (i in 1:length(uni_p)) {
temp_size <- length(which(prime_vec == uni_p[i]))
powers <- c(powers, pow.bigz(uni_p[i], 1:temp_size))
}
my_factors <- c(as.bigz(1L), my_factors, powers)
temp_facs <- powers; r <- 2L
temp_facs2 <- max_p_div2 <- as.bigz(vector())
while (r <= length(uni_p)) {
for (i in 1:length(temp_facs)) {
a <- which(prime_vec > max_p_div[i])
temp <- mul.bigz(temp_facs[i], powers[a])
temp_facs2 <- c(temp_facs2, temp)
max_p_div2 <- c(max_p_div2, prime_vec[a])
}
my_sort <- sort.list(asNumeric(max_p_div2))
temp_facs <- temp_facs2[my_sort]
max_p_div <- max_p_div2[my_sort]
my_factors <- c(my_factors, temp_facs)
temp_facs2 <- max_p_div2 <- as.bigz(vector()); r <- r+1L
}
}
my_factors[sort.list(asNumeric(my_factors))]
}
Algorithm 2 (sieve)
EfficientFactorList <- function(n) {
MyFactsList <- lapply(1:n, function(x) 1)
for (j in 2:n) {
for (r in seq.int(j, n, j)) {MyFactsList[[r]] <- c(MyFactsList[[r]], j)}
}; MyFactsList}
It gives the factorization of every number between 1 and 100,000 in less than 2 seconds. To give you an idea of the efficiency of this algorithm, the time to factor 1 - 100,000 using the brute force method takes close to 3 minutes.
system.time(t1 <- EfficientFactorList(10^5))
user system elapsed
1.04 0.00 1.05
system.time(t2 <- sapply(1:10^5, MyFactors))
user system elapsed
39.21 0.00 39.23
system.time(t3 <- sapply(1:10^5, all_divisors))
user system elapsed
49.03 0.02 49.05
TheTest <- sapply(1:10^5, function(x) all(t2[[x]]==t3[[x]]) && all(asNumeric(t2[[x]])==t1[[x]]) && all(asNumeric(t3[[x]])==t1[[x]]))
all(TheTest)
[1] TRUE
Final Thoughts
#Dontas’s original comment about factoring large numbers got me thinking, what about really really large numbers… like numbers greater than 2^200. You will see that whichever algorithm you choose on this page, they will all take a very long time because most of them rely on gmp::factorize which uses the Pollard-Rho algorithm. From this question, this algorithm is only reasonable for numbers less than 2^70. I am currently working on my own factorize algorithm which will implement the Quadratic Sieve, which should take all of these algorithms to the next level.
A lot has changed in the R language since this question was originally asked. In version 0.6-3 of the numbers package, the function divisors was included that is very useful for getting all of the factors of a number. It will meet the needs of most users, however if you are looking for raw speed or you are working with larger numbers, you will need an alternative method. I have authored two new packages (partially inspired by this question, I might add) that contain highly optimized functions aimed at problems just like this. The first one is RcppAlgos and the other is RcppBigIntAlgos (formerly called bigIntegerAlgos).
RcppAlgos
RcppAlgos contains two functions for obtaining divisors of numbers less than 2^53 - 1 : divisorsRcpp (a vectorized function for quickly obtaining the complete factorization of many numbers) & divisorsSieve (quickly generates the complete factorization over a range). First up, we factor many random numbers using divisorsRcpp:
library(gmp) ## for all_divisors by #GeorgeDontas
library(RcppAlgos)
library(numbers)
options(scipen = 999)
set.seed(42)
testSamp <- sample(10^10, 10)
## vectorized so you can pass the entire vector as an argument
testRcpp <- divisorsRcpp(testSamp)
testDontas <- lapply(testSamp, all_divisors)
identical(lapply(testDontas, as.numeric), testRcpp)
[1] TRUE
And now, factor many numbers over a range using divisorsSieve:
system.time(testSieve <- divisorsSieve(10^13, 10^13 + 10^5))
user system elapsed
0.242 0.006 0.247
system.time(testDontasSieve <- lapply((10^13):(10^13 + 10^5), all_divisors))
user system elapsed
47.880 0.132 47.922
identical(lapply(testDontasSieve, asNumeric), testSieve)
[1] TRUE
Both divisorsRcpp and divisorsSieve are nice functions that are flexible and efficient, however they are limited to 2^53 - 1.
RcppBigIntAlgos
The RcppBigIntAlgos package (formerly called bigIntegerAlgos prior to version 0.2.0) links directly to the C library gmp and features divisorsBig which is designed for very large numbers.
library(RcppBigIntAlgos)
## testSamp is defined above... N.B. divisorsBig is not quite as
## efficient as divisorsRcpp. This is so because divisorsRcpp
## can take advantage of more efficient data types.
testBig <- divisorsBig(testSamp)
identical(testDontas, testBig)
[1] TRUE
And here are the benchmark as defined in my original post (N.B. MyFactors is replaced by divisorsRcpp and divisorsBig).
## Case 2
library(rbenchmark)
set.seed(199)
samp <- sample(10^9, 10^5)
benchmark(RcppAlgos=divisorsRcpp(samp),
RcppBigIntAlgos=divisorsBig(samp),
DontasDivs=lapply(samp, all_divisors),
replications=10,
columns = c("test", "replications", "elapsed", "relative"),
order = "relative")
test replications elapsed relative
1 RcppAlgos 10 5.236 1.000
2 RcppBigIntAlgos 10 12.846 2.453
3 DontasDivs 10 383.742 73.289
## Case 3
set.seed(97)
samp <- sample(10^6, 10^4)
benchmark(RcppAlgos=divisorsRcpp(samp),
RcppBigIntAlgos=divisorsBig(samp),
numbers=lapply(samp, divisors), ## From the numbers package
DontasDivs=lapply(samp, all_divisors),
CottonDivs=lapply(samp, get_all_factors),
ChaseDivs=lapply(samp, FUN),
replications=5,
columns = c("test", "replications", "elapsed", "relative"),
order = "relative")
test replications elapsed relative
1 RcppAlgos 5 0.083 1.000
2 RcppBigIntAlgos 5 0.265 3.193
3 numbers 5 12.913 155.578
4 DontasDivs 5 15.813 190.518
5 CottonDivs 5 60.745 731.867
6 ChaseDivs 5 299.520 3608.675
The next benchmarks demonstrate the true power of the underlying algorithm in the divisorsBig function. The number being factored is a power of 10, so the prime factoring step can almost be completely ignored (e.g. system.time(factorize(pow.bigz(10,30))) registers 0 on my machine). Thus, the difference in timing is due solely to how quickly the prime factors can be combined to produce all factors.
library(microbenchmark)
powTen <- pow.bigz(10, 30)
microbenchmark(divisorsBig(powTen), all_divisors(powTen), unit = "relative")
Unit: relative
expr min lq mean median uq max neval
divisorsBig(powTen) 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 100
all_divisors(powTen) 21.49849 21.27973 21.13085 20.63345 21.18834 20.38772 100
## Negative numbers show an even greater increase in efficiency
negPowTen <- powTen * -1
microbenchmark(divisorsBig(negPowTen), all_divisors(negPowTen), unit = "relative")
Unit: relative
expr min lq mean median uq max neval
divisorsBig(negPowTen) 1.00000 1.0000 1.0000 1.00000 1.00000 1.00000 100
all_divisors(negPowTen) 28.75275 28.1864 27.9335 27.57434 27.91376 30.16962 100
Very Large Numbers
With divisorsBig, obtaining the complete factorization with very large inputs is no problem. The algorithm dynamically adjusts based off of the input and applies different algorithms in different situations. We can also take advantage of multithreading if Lenstra's Elliptic Curve method or the Quadratic Sieve is utilized.
Here are some examples using n5 and n9 defined in this answer.
n5 <- as.bigz("94968915845307373740134800567566911")
system.time(print(divisorsBig(n5)))
Big Integer ('bigz') object of length 4:
[1] 1 216366620575959221 438925910071081891
[4] 94968915845307373740134800567566911
user system elapsed
0.162 0.003 0.164
n9 <- prod(nextprime(urand.bigz(2, 82, 42)))
system.time(print(divisorsBig(n9, nThreads=4)))
Big Integer ('bigz') object of length 4:
[1] 1
[2] 2128750292720207278230259
[3] 4721136619794898059404993
[4] 10050120961360479179164300841596861740399588283187
user system elapsed
1.776 0.011 0.757
Here is an example provided by #Dontas with one large prime and one smaller prime:
x <- pow.bigz(2, 256) + 1
divisorsBig(x, showStats=TRUE, nThreads=8)
Summary Statistics for Factoring:
115792089237316195423570985008687907853269984665640564039457584007913129639937
| Pollard Rho Time |
|--------------------|
| 479ms |
| Lenstra ECM Time | Number of Curves |
|--------------------|--------------------|
| 1s 870ms | 2584 |
| Total Time |
|--------------------|
| 2s 402ms |
Big Integer ('bigz') object of length 4:
[1] 1
[2] 1238926361552897
[3] 93461639715357977769163558199606896584051237541638188580280321
[4] 115792089237316195423570985008687907853269984665640564039457584007913129639937
Compare this to finding the prime factorization using gmp::factorize:
system.time(factorize(x))
user system elapsed
9.199 0.036 9.248
Lastly, here is an example with a large semiprime (N.B. since we know it's a semiprime, we skip the extended Pollard's rho algorithm as well as Lentra's elliptic curve method).
## https://members.loria.fr/PZimmermann/records/rsa.html
rsa79 <- as.bigz("7293469445285646172092483905177589838606665884410340391954917800303813280275279")
divisorsBig(rsa79, nThreads=8, showStats=TRUE, skipPolRho=T, skipECM=T)
Summary Statistics for Factoring:
7293469445285646172092483905177589838606665884410340391954917800303813280275279
| MPQS Time | Complete | Polynomials | Smooths | Partials |
|--------------------|----------|-------------|------------|------------|
| 2m 49s 174ms | 100 | 91221 | 5651 | 7096 |
| Mat Algebra Time | Mat Dimension |
|--------------------|--------------------|
| 14s 863ms | 12625 x 12747 |
| Total Time |
|--------------------|
| 3m 4s 754ms |
Big Integer ('bigz') object of length 4:
[1] 1
[2] 848184382919488993608481009313734808977
[3] 8598919753958678882400042972133646037727
[4] 7293469445285646172092483905177589838606665884410340391954917800303813280275279

Resources