m0_62603116 2022-07-07 18:12 采纳率: 0%
浏览 45
已结题

有没有会支持向量机的,本人有代码讲解改错,价钱可商量

本人有一个支持向量机代码的包,本人可讲解该包的理论需要一个会编写matlab程序的帮我讲解一下并处理一下错误
下面是代码,价钱可商量

  • 写回答

2条回答 默认 最新

  • m0_62603116 2022-07-07 18:18
    关注

    function [label,prediction,cpu_time] = means3vm_iter(y,x,x_test,opt)

    % means3vm_iter implements the means3vm_iter algorithm as shown in [1].
    %
    %: means3vm_iter employs the Matlab version of libsvm [2] (available at
    % http://www.csie.ntu.edu.tw/~cjlin/libsvm/) and a slight modified matlab
    % version of libsvm (availaible at 'libsvm-mat-2.83-1-[modified]' included).
    %
    % Syntax
    %
    % [label,prediction,cputime] = means3vm_iter(y,x,x_test,opt)
    %
    % Description
    %
    % means3vm_iter takes,
    % y - A nx1 label vector, where y = 1 means positive, y = -1 means negative, y = 0 means unlabeled
    % x - A nxd training data matrix, where d is the dimension of instance
    % x_test - A mxd testing data matrix
    % opt - A structure describes option of SVM
    % 1) opt.c1: regularization term for labeled instances (see Eq.(7) in [1]), default setting opt.c1= 100
    % 2) opt.c2: regularization term for unlabeled instances(see Eq.(7) in [1]), default setting opt.c2 = 0.1
    % 3) opt.gaussian: kernel type, 1 means gaussian kernel, 0 means linear kernel, default setting opt.gaussian = 0
    % 4) opt.gamma: parameter for gaussian kernel, i.e.,k(x,y) = exp(-gamma*||x-y||^2), default settting 1/gamma = average distance between pattern
    % 5) opt.maxiter: maximal iteration number, default setting opt.maxiter = 50
    % 6) opt.ep: expected number of positive instance among unlabeled data, default setting opt.ep = prior from labeled data
    % and returns,
    % label - A mx1 label vector, the predicted label of the testing data
    % prediction - A mx1 prediction vector, the prediction of the testing data
    % cputime - cpu running time
    %
    %
    % [1] Y.-F. Li, J. T. Kwok, and Z.-H. Zhou. Semi-supervised learning using label mean. In: Proceedings of the 26th International Conference on Machine Learning (ICML'09), Montreal, Canada, 2009, pp.633-640.
    % [2] R.-E. Fan, P.-H. Chen, and C.-J. Lin. Working set selection using second order information for training SVM. Journal of Machine Learning Research 6, 1889-1918, 2005.

    tt = cputime;
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    %1. initilization
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%

    % 1.1 initilize the option
    n = size(x,1);
    m = size(x_test,1);
    l_inx = find(y);
    u_inx = find(y==0);
    ln = length(l_inx);
    un = length(u_inx);

    if ~isfield(opt,'c1')
    opt.c1 = 100;
    end
    if ~isfield(opt,'c2')
    opt.c2 = 0.1;
    end
    if ~isfield(opt,'gaussian')
    opt.gaussian = 0;
    end
    if opt.gaussian == 1 && ~isfield(opt,'gamma')
    opt.gamma = n^2/sum(sum(repmat(sum(x.x,2)',n,1) + repmat(sum(x.x,2),1,n) ...
    - 2
    x
    x'));
    end
    if ~isfield(opt,'maxiter')
    opt.maxiter = 50;
    end
    if ~isfield(opt,'ep')
    opt.ep = ceil(length(find(y == 1))/ln*un);
    end

    %1.2 calculate the kernel matrix
    if opt.gaussian == 1
    K = exp(- (repmat(sum(x.x,2)',n,1) + repmat(sum(x.x,2),1,n)-2xx') opt.gamma);
    K = K + 1e-10
    eye(size(K,1));
    K_test = exp(- (repmat(sum(x.x,2),1,m) + repmat(sum(x_test.x_test,2)',n,1)- 2xx_test') * opt.gamma);
    else
    K = xx';
    K = K + 1e-10
    eye(size(K,1));
    K_test = x*x_test';
    end

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % 2. train supervised SVM
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    opt_svm = ['-t 4 -c ' num2str(opt.c1)];
    K_l = K(l_inx,l_inx);
    K_l = [(1:ln)',K_l];
    addpath('libsvm-mat-2.83-1');
    model = svmtrain(y(l_inx),K_l,opt_svm);

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % 3. iteratively predict the unlabeled data and refine svm
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

    % 3.1 predict the label for unlabeled data
    K_t = K(l_inx,u_inx);
    K_t = [(1:un)',K_t'];
    [predict_label, accuracy, dec_values] = svmpredict(ones(un,1),K_t,model);
    if model.Label(1) == -1
    dec_values = -dec_values;
    end
    tmpd = y;
    [val,ix] = sort(dec_values,'descend');
    tmpd(u_inx(ix(1:opt.ep))) = 1;
    tmpd(u_inx(ix(opt.ep+1:un))) = -1;

    iter = 1; flag = 1;
    while iter < opt.maxiter && flag

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % 3.2 Solve a quadratic programming problem (Eq.(8) in [1]) which is dual form of Eq.(7) in [1]
    % 
    %        max_a  \sum_{i=1}^{ln} a_i - 1/2 (a.*\hat{y})' K^{tmpd} (a.* \hat{y})
    %        s.t.   \sum_{i=1}^{ln} a_i + a_{ln+1} - a_{ln+2} = 0,
    %               a_{ln+1} + a_{ln+2} = c2,
    %               0 <= a_i <= c1, i = 1,..., ln
    %               0 <= a_i <= c2, i = ln+1, ln+2
    % where K^{tmpd} is defined as the same as Eq.(10) in [1] and \hat{y} = [y,1,-1];
    % Here, we solve this QP problem via standard matlab function 'quadprog'
    %   QUADPROG Quadratic programming. 
    %   X = QUADPROG(H,f,A,b) attempts to solve the quadratic programming
    %   problem:
    %
    %            min 0.5*x'*H*x + f'*x   subject to:  A*x <= b
    %             x
    %
    %   X = QUADPROG(H,f,A,b,Aeq,beq) solves the problem above while
    %   additionally satisfying the equality constraints Aeq*x = beq.
    %
    %   X = QUADPROG(H,f,A,b,Aeq,beq,LB,UB) defines a set of lower and upper
    %   bounds on the design variables, X, so that the solution is in the
    %   range LB <= X <= UB. Use empty matrices for LB and UB if no bounds
    %   exist. Set LB(i) = -Inf if X(i) is unbounded below; set UB(i) = Inf if
    %   X(i) is unbounded above.
    
    var_n = ln+2;   % variable number 
    con_n = 2;      % constraint number
    
    pos_inx = u_inx(logical(tmpd(u_inx)==1));
    neg_inx = u_inx(logical(tmpd(u_inx)==-1));
    
    % Hessian matrix, i.e., H = K^{tmpd}.* (\hat{y}*\hat{y}')
    H = zeros(var_n);
    H(1:ln,1:ln) = K(l_inx,l_inx); 
    H(ln+1,1:ln) = mean(K(pos_inx,l_inx));
    H(1:ln,ln+1) = H(ln+1,1:ln)';
    H(ln+2,1:ln) = mean(K(neg_inx,l_inx));
    H(1:ln,ln+2) = H(ln+2,1:ln)';
    H(ln+1,ln+1) = mean(mean(K(pos_inx,pos_inx)));
    H(ln+1,ln+2) = mean(mean(K(pos_inx,neg_inx)));
    H(ln+2,ln+1) = H(ln+1,ln+2);
    H(ln+2,ln+2) = mean(mean(K(neg_inx,neg_inx)));      
    tr_y = [y(l_inx);1;-1];
    H = H.* (tr_y*tr_y');
    for i = 1:var_n
        H(i,i) = H(i,i) + 1e-6;
    end
    
    %linear term f
    f = zeros(var_n,1);
    f(1:ln) = -1;
        
    %Aeq
    A = zeros(con_n, var_n);
    A(1,1:var_n) = [y(l_inx)',1,-1];
    A(2,ln+1:ln+2) = 1;
               
    %b
    b = [0;opt.c2];
        
    %lb & ub
    lb = zeros(var_n,1);
    ub = opt.c1*ones(var_n,1);
    ub(ln+1:ln+2) = opt.c2;
        
    %call quadprog
    tic
    alpha = quadprog(H,f,[],[],A,b,lb,ub);
    toc
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % 3.1 again, predict the label for unlabeled data
    
    % get the Support Vector
    inx = find(1e-6 < alpha(1:ln));  
    
    % calculate the rho, f(x) = w'x + rho
    pre_val = (alpha.*tr_y)'*(H.*(tr_y*tr_y'));
    rho = mean(tr_y(inx)-pre_val(inx)');
    
    % reconstruct the prdiction
    new_alpha = zeros(1,n);
    new_alpha(l_inx) = alpha(1:ln)';
    new_alpha(pos_inx) = alpha(ln+1)/length(pos_inx);
    new_alpha(neg_inx) = alpha(ln+2)/length(neg_inx);      
    pre_val = (new_alpha.*tmpd')*K + rho;
    
    % predict the label for unlabeled data
    tmptmpd = y;
    [val,ix] = sort(pre_val(u_inx),'descend');
    tmptmpd(u_inx(ix(1:opt.ep))) = 1;
    tmptmpd(u_inx(ix(opt.ep+1:un))) = -1;
    
    % check the stop condition
    if tmptmpd == tmpd
        flag = 0;
    else
        tmpd = tmptmpd; iter = iter + 1;
    end  
    

    end

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % 4. train a final SVM and predict the test data
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % preprocess the input for means3vm
    addpath('libsvm-means3vm')
    posinx = find(tmpd == 1); pos = length(posinx);
    neginx = find(tmpd == -1); neg = length(neginx);

    pl = length(find(y(l_inx) == 1));
    nl = length(find(y(l_inx) == -1));
    posinx(1:pl) = find(y == 1);
    posinx(pl+1:pos) = u_inx(logical(tmpd(u_inx)'==1));
    neginx(1:nl) = find(y == -1);
    neginx(nl+1:neg) = u_inx(logical(tmpd(u_inx)' == -1));

    tmpy = tmpd;
    tmpy(1:pos) = 1;
    tmpy(pos+1:pos+neg) = -1;

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % train means3vm
    opt_svm = ['-t 4 -c ' num2str(opt.c1) ' -w2 ' num2str(opt.c2/opt.c1)];
    K_l = K([posinx;neginx],[posinx;neginx]);
    K_l = [(1:n)',K_l];
    model = svmtrain(tmpy,K_l,opt_svm,[pl,nl,pos,neg]);

    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    % prediction
    K_test = [(1:m)',K_test([posinx;neginx],:)'];
    yy_test = zeros(m,1);
    [label, accuracy, prediction] = svmpredict(yy_test,K_test,model)
    if model.Label(1) <0
    prediction = -prediction;
    end
    cpu_time = cputime - tt;

    评论

报告相同问题?

问题事件

  • 系统已结题 7月15日
  • 创建了问题 7月7日

悬赏问题

  • ¥15 如何在scanpy上做差异基因和通路富集?
  • ¥20 关于#硬件工程#的问题,请各位专家解答!
  • ¥15 关于#matlab#的问题:期望的系统闭环传递函数为G(s)=wn^2/s^2+2¢wn+wn^2阻尼系数¢=0.707,使系统具有较小的超调量
  • ¥15 FLUENT如何实现在堆积颗粒的上表面加载高斯热源
  • ¥30 截图中的mathematics程序转换成matlab
  • ¥15 动力学代码报错,维度不匹配
  • ¥15 Power query添加列问题
  • ¥50 Kubernetes&Fission&Eleasticsearch
  • ¥15 報錯:Person is not mapped,如何解決?
  • ¥15 c++头文件不能识别CDialog