Lattice Reduction(LLL)算法在C语言中实现

2022-08-08,,

Lattice Reduction(LLL)算法在C语言中实现

采用了仿照手算的方法编写,使用了 “取出向量——>向量计算——>放回矩阵” 的模式进行计算,欢迎大家交流指正!

参考文献:Factoring Polynomials with Rational Coefficients, 作者 A.K. Lenstra, H.W. Lenstra, Jr. and L. Lovasz.

#include "stdafx.h"
#include <math.h>
#include "stdlib.h"
#include "stdio.h"

double delta = 0.75;//lovasz常数
double intermulti(double *A, double *B,int n)//正交计算
{
	double sum = 0;
	for (int i = 0; i < n; i++)
	{
		sum += A[i] * B[i];
	}
	return sum;
}

void add(double *IN1, double *IN2, double *OUT, int n)//向量加法
{
	for (int i = 0; i < n; i++)
	{
		OUT[i] = IN1[i] + IN2[i];
	}
}

void minus(double *IN1, double *IN2, double *OUT, int n)//向量减法
{
	for (int i = 0; i < n; i++)
	{
		OUT[i] = IN1[i] - IN2[i];
	}
}

void multiply(double IN1, double *IN2, double *OUT, int n)//向量点乘
{
	for (int i = 0; i < n; i++)
	{
		OUT[i] = IN1*IN2[i];
	}
}

double mui(double *A, double *B, int n)//求<a,b>/||b||^2过程
{
	double a, b;
	a = intermulti(A, B, n);
	b = intermulti(B, B, n);
	return a / b;
}

void vetor(int num, double *IN, double *OUT, int col,int row)//取出向量操作
{															//num为取出矩阵中向量序号
	if (num > row) return;									
	for (int i = 0; i <col; i++)
	{
		OUT[i] = IN[(num - 1)*col + i];
	}
}

void Vetor_to_Array(double *Ve, double *Ar, int col, int row, int n)//将放回计算好的向量放回矩阵
{																	//n为将向量放在第n个位置
	for (int i = 0; i < col; i++)									
	{																					
		Ar[(n - 1)*col + i] = Ve[i];
	}
}

void Schmidt(double *IN, double *OUT, int col, int row)//施密特正交化
{
	double k, *a, *b; int q;
	a = (double *)calloc(col, sizeof(double));
	b = (double *)calloc(col, sizeof(double));
	for (int i = 0; i < col*row; i++)
	{
		if (i/col)
		{
			k = IN[i];
			vetor(i / col + 1, IN, a, col, row);
			for (q = 0; q < i/col; q++)
			{
				vetor(q + 1, OUT, b, col, row);
				k -= mui(a,b,col)*OUT[q+(i%col)];
			}
			OUT[i] = k;
		}
		else
		{
			OUT[i] = IN[i];
		}
	}
	free(a);
	free(b);
}

void swap_vetor(double *IN, int col, int row, int A, int B)//矩阵中两向量交换位置
{															//A、B为两向量编号
	double temp;
	for (int i = 0; i < col; i++)
	{
		temp = IN[(A-1)*col + i];
		IN[(A - 1)*col + i] = IN[(B - 1)*col + i];
		IN[(B - 1)*col + i] = temp;
	}
}

int Is_Fail_lovaszCondition(double *IN,double *IN_sch, int col, int row,int K)//判断是否符合lovasz条件,K无用可舍去
{												
	double *a, *b, *c, *d; int count = 0;
	a = (double *)calloc(col, sizeof(double));
	b = (double *)calloc(col, sizeof(double));
	c = (double *)calloc(col, sizeof(double));
	d = (double *)calloc(col, sizeof(double));


	for (int i = 1; i < row; i++)
	{
		vetor(i + 1, IN, a, col, row);
		vetor(i, IN_sch, b, col, row);
		vetor(i + 1, IN_sch, c, col, row);
		
		multiply(mui(a, b, col), b, d, col);
		add(d, c, d, col);
		if (delta*intermulti(b, b, col)>intermulti(d, d, col))//lovasz条件判断
		{
			++count;
			swap_vetor(IN, col, row, i, i + 1);
		}
	}
	free(a);
	free(b);
	free(c);
	free(d);
	return count;
}

void Vetor_Change(double *Ar, double *Ar_sch, int I1, int J1, int col, int row)//Size-Reduction变换
{												     
	double *a, *b,*b_sch, *d;
	double c;
	
	a = (double *)calloc(col, sizeof(double));
	b = (double *)calloc(col, sizeof(double));
	b_sch = (double *)calloc(col, sizeof(double));
	d = (double *)calloc(col, sizeof(double));
	vetor(I1, Ar, a, col, row);
	vetor(J1, Ar, b, col, row);
	vetor(J1, Ar_sch, b_sch, col, row);

	c = round(mui(a,b_sch,col));
	multiply(c, b, d, col);
	minus(a, d, d, col);
	Vetor_to_Array(d, Ar, col, row, I1);

	free(a);
	free(b);
	free(b_sch);
	free(d);
}

void LLL(double *IN, double *OUT, int col, int row)//LLL算法本体
{
	double *a, *a_sch;
	int k = 1, i, by = 0, qjr = 1;
	a = (double *)calloc(col*row, sizeof(double));
	a_sch = (double *)calloc(col*row, sizeof(double));

	for ( i = 0; i <col*row ; i++)
	{
		a[i] = IN[i];
	}

	while (by+qjr)
	{
		Schmidt(a, a_sch, col, row);
		for ( i = 1; i <= row; i++)
		{
			for (int j = i - 1; j >= 0; j--)
			{
				Vetor_Change(a, a_sch, i, j, col, row);
			}
		}
		if (!Is_Fail_lovaszCondition(a, a_sch, col, row, k))
		{
			//k = k - 1 > 1 ? k - 1 : 1;
			break;
		}
	}

	for ( i = 0; i < col*row; i++)
	{
		OUT[i] = a[i];
	}
	free(a);
	free(a_sch);
}

int _tmain(int argc, _TCHAR* argv[])//main函数
{
	int col =  ,//列数
		row =  ;//行数
	double input[/*填入总元素数*/] = { /*填入你的矩阵,行向量形式*/ };
	double output[/*填入总元素数*/] = { 0 };
	LLL(input, output, col, row);
	for (int i = 0; i < col*row; i++)
	{
		printf("%f ", output[i]);
		if(i%col==(col-1)) printf("\n");
	}
	return 0;
}

本文地址:https://blog.csdn.net/qq_33127317/article/details/107217766

《Lattice Reduction(LLL)算法在C语言中实现.doc》

下载本文的Word格式文档,以方便收藏与打印。