openvino里的object detection demo 运行的时候有很多报错是怎么回事? 5C

代码如下:/*
// Copyright (c) 2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include

#include
#include
#include

#include
#include
#include
#include "object_detection_demo.h"
#include "detectionoutput.h"

using namespace InferenceEngine;

bool ParseAndCheckCommandLine(int argc, char *argv[]) {
// ---------------------------Parsing and validation of input args--------------------------------------
slog::info << "Parsing input parameters" << slog::endl;

gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
if (FLAGS_h) {
    showUsage();
    return false;
}

if (FLAGS_ni < 1) {
    throw std::logic_error("Parameter -ni should be greater than 0 (default: 1)");
}

if (FLAGS_i.empty()) {
    throw std::logic_error("Parameter -i is not set");
}

if (FLAGS_m.empty()) {
    throw std::logic_error("Parameter -m is not set");
}

return true;

}

/**

  • \brief The entry point for the Inference Engine object_detection demo application
  • \file object_detection_demo/main.cpp
  • \example object_detection_demo/main.cpp
    /
    int main(int argc, char *argv[]) {
    try {
    /
    * This demo covers certain topology and cannot be generalized for any object detection one **/
    slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << "\n";

    // ------------------------------ Parsing and validation of input args ---------------------------------
    if (!ParseAndCheckCommandLine(argc, argv)) {
        return 0;
    }
    
    /** This vector stores paths to the processed images **/
    std::vector<std::string> images;
    parseImagesArguments(images);
    if (images.empty()) throw std::logic_error("No suitable images were found");
    // -----------------------------------------------------------------------------------------------------
    
    // --------------------------- 1. Load Plugin for inference engine -------------------------------------
    slog::info << "Loading plugin" << slog::endl;
    InferencePlugin plugin = PluginDispatcher({ FLAGS_pp, "../../../lib/intel64" , "" }).getPluginByDevice(FLAGS_d);
    
    /*If CPU device, load default library with extensions that comes with the product*/
    if (FLAGS_d.find("CPU") != std::string::npos) {
        /**
        * cpu_extensions library is compiled from "extension" folder containing
        * custom MKLDNNPlugin layer implementations. These layers are not supported
        * by mkldnn, but they can be useful for inferencing custom topologies.
        **/
        plugin.AddExtension(std::make_shared<Extensions::Cpu::CpuExtensions>());
    }
    
    if (!FLAGS_l.empty()) {
        // CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
        IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
        plugin.AddExtension(extension_ptr);
        slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
    }
    
    if (!FLAGS_c.empty()) {
        // clDNN Extensions are loaded from an .xml description and OpenCL kernel files
        plugin.SetConfig({ { PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c } });
        slog::info << "GPU Extension loaded: " << FLAGS_c << slog::endl;
    }
    
    /** Setting plugin parameter for per layer metrics **/
    if (FLAGS_pc) {
        plugin.SetConfig({ { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } });
    }
    
    /** Printing plugin version **/
    printPluginVersion(plugin, std::cout);
    // -----------------------------------------------------------------------------------------------------
    
    // --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
    std::string binFileName = fileNameNoExt(FLAGS_m) + ".bin";
    slog::info << "Loading network files:"
        "\n\t" << FLAGS_m <<
        "\n\t" << binFileName <<
        slog::endl;
    
    CNNNetReader networkReader;
    /** Read network model **/
    networkReader.ReadNetwork(FLAGS_m);
    
    /** Extract model name and load weigts **/
    networkReader.ReadWeights(binFileName);
    CNNNetwork network = networkReader.getNetwork();
    
    Precision p = network.getPrecision();
    // -----------------------------------------------------------------------------------------------------
    
    // --------------------------- 3. Configure input & output ---------------------------------------------
    
    // ------------------------------ Adding DetectionOutput -----------------------------------------------
    
    /**
    * The only meaningful difference between Faster-RCNN and SSD-like topologies is the interpretation
    * of the output data. Faster-RCNN has 2 output layers which (the same format) are presented inside SSD.
    *
    * But SSD has an additional post-processing DetectionOutput layer that simplifies output filtering.
    * So here we are adding 3 Reshapes and the DetectionOutput to the end of Faster-RCNN so it will return the
    * same result as SSD and we can easily parse it.
    */
    
    std::string firstLayerName = network.getInputsInfo().begin()->first;
    
    int inputWidth = network.getInputsInfo().begin()->second->getTensorDesc().getDims()[3];
    int inputHeight = network.getInputsInfo().begin()->second->getTensorDesc().getDims()[2];
    
    DataPtr bbox_pred_reshapeInPort = ((ICNNNetwork&)network).getData(FLAGS_bbox_name.c_str());
    if (bbox_pred_reshapeInPort == nullptr) {
        throw std::logic_error(std::string("Can't find output layer named ") + FLAGS_bbox_name);
    }
    
    SizeVector bbox_pred_reshapeOutDims = {
        bbox_pred_reshapeInPort->getTensorDesc().getDims()[0] *
        bbox_pred_reshapeInPort->getTensorDesc().getDims()[1], 1
    };
    DataPtr rois_reshapeInPort = ((ICNNNetwork&)network).getData(FLAGS_proposal_name.c_str());
    if (rois_reshapeInPort == nullptr) {
        throw std::logic_error(std::string("Can't find output layer named ") + FLAGS_proposal_name);
    }
    
    SizeVector rois_reshapeOutDims = { rois_reshapeInPort->getTensorDesc().getDims()[0] * rois_reshapeInPort->getTensorDesc().getDims()[1], 1 };
    
    DataPtr cls_prob_reshapeInPort = ((ICNNNetwork&)network).getData(FLAGS_prob_name.c_str());
    if (cls_prob_reshapeInPort == nullptr) {
        throw std::logic_error(std::string("Can't find output layer named ") + FLAGS_prob_name);
    }
    
    SizeVector cls_prob_reshapeOutDims = { cls_prob_reshapeInPort->getTensorDesc().getDims()[0] * cls_prob_reshapeInPort->getTensorDesc().getDims()[1], 1 };
    
    /*
    Detection output
    */
    
    int normalized = 0;
    int prior_size = normalized ? 4 : 5;
    int num_priors = rois_reshapeOutDims[0] / prior_size;
    
    // num_classes guessed from the output dims
    if (bbox_pred_reshapeOutDims[0] % (num_priors * 4) != 0) {
        throw std::logic_error("Can't guess number of classes. Something's wrong with output layers dims");
    }
    int num_classes = bbox_pred_reshapeOutDims[0] / (num_priors * 4);
    slog::info << "num_classes guessed: " << num_classes << slog::endl;
    
    LayerParams detectionOutParams;
    detectionOutParams.name = "detection_out";
    detectionOutParams.type = "DetectionOutput";
    detectionOutParams.precision = p;
    CNNLayerPtr detectionOutLayer = CNNLayerPtr(new CNNLayer(detectionOutParams));
    detectionOutLayer->params["background_label_id"] = "0";
    detectionOutLayer->params["code_type"] = "caffe.PriorBoxParameter.CENTER_SIZE";
    detectionOutLayer->params["eta"] = "1.0";
    detectionOutLayer->params["input_height"] = std::to_string(inputHeight);
    detectionOutLayer->params["input_width"] = std::to_string(inputWidth);
    detectionOutLayer->params["keep_top_k"] = "200";
    detectionOutLayer->params["nms_threshold"] = "0.3";
    detectionOutLayer->params["normalized"] = std::to_string(normalized);
    detectionOutLayer->params["num_classes"] = std::to_string(num_classes);
    detectionOutLayer->params["share_location"] = "0";
    detectionOutLayer->params["top_k"] = "400";
    detectionOutLayer->params["variance_encoded_in_target"] = "1";
    detectionOutLayer->params["visualize"] = "False";
    
    detectionOutLayer->insData.push_back(bbox_pred_reshapeInPort);
    detectionOutLayer->insData.push_back(cls_prob_reshapeInPort);
    detectionOutLayer->insData.push_back(rois_reshapeInPort);
    
    SizeVector detectionOutLayerOutDims = { 7, 200, 1, 1 };
    DataPtr detectionOutLayerOutPort = DataPtr(new Data("detection_out", detectionOutLayerOutDims, p,
        TensorDesc::getLayoutByDims(detectionOutLayerOutDims)));
    detectionOutLayerOutPort->creatorLayer = detectionOutLayer;
    detectionOutLayer->outData.push_back(detectionOutLayerOutPort);
    
    DetectionOutputPostProcessor detOutPostProcessor(detectionOutLayer.get());
    
    network.addOutput(FLAGS_bbox_name, 0);
    network.addOutput(FLAGS_prob_name, 0);
    network.addOutput(FLAGS_proposal_name, 0);
    
    // --------------------------- Prepare input blobs -----------------------------------------------------
    slog::info << "Preparing input blobs" << slog::endl;
    
    /** Taking information about all topology inputs **/
    InputsDataMap inputsInfo(network.getInputsInfo());
    
    /** SSD network has one input and one output **/
    if (inputsInfo.size() != 1 && inputsInfo.size() != 2) throw std::logic_error("Demo supports topologies only with 1 or 2 inputs");
    
    std::string imageInputName, imInfoInputName;
    
    InputInfo::Ptr inputInfo = inputsInfo.begin()->second;
    
    SizeVector inputImageDims;
    /** Stores input image **/
    
    /** Iterating over all input blobs **/
    for (auto & item : inputsInfo) {
        /** Working with first input tensor that stores image **/
        if (item.second->getInputData()->getTensorDesc().getDims().size() == 4) {
            imageInputName = item.first;
    
            slog::info << "Batch size is " << std::to_string(networkReader.getNetwork().getBatchSize()) << slog::endl;
    
            /** Creating first input blob **/
            Precision inputPrecision = Precision::U8;
            item.second->setPrecision(inputPrecision);
    
        }
        else if (item.second->getInputData()->getTensorDesc().getDims().size() == 2) {
            imInfoInputName = item.first;
    
            Precision inputPrecision = Precision::FP32;
            item.second->setPrecision(inputPrecision);
            if ((item.second->getTensorDesc().getDims()[1] != 3 && item.second->getTensorDesc().getDims()[1] != 6) ||
                item.second->getTensorDesc().getDims()[0] != 1) {
                throw std::logic_error("Invalid input info. Should be 3 or 6 values length");
            }
        }
    }
    
    // ------------------------------ Prepare output blobs -------------------------------------------------
    slog::info << "Preparing output blobs" << slog::endl;
    
    OutputsDataMap outputsInfo(network.getOutputsInfo());
    
    const int maxProposalCount = detectionOutLayerOutDims[1];
    const int objectSize = detectionOutLayerOutDims[0];
    
    /** Set the precision of output data provided by the user, should be called before load of the network to the plugin **/
    
    outputsInfo[FLAGS_bbox_name]->setPrecision(Precision::FP32);
    outputsInfo[FLAGS_prob_name]->setPrecision(Precision::FP32);
    outputsInfo[FLAGS_proposal_name]->setPrecision(Precision::FP32);
    // -----------------------------------------------------------------------------------------------------
    
    // --------------------------- 4. Loading model to the plugin ------------------------------------------
    slog::info << "Loading model to the plugin" << slog::endl;
    
    ExecutableNetwork executable_network = plugin.LoadNetwork(network, {});
    // -----------------------------------------------------------------------------------------------------
    
    // --------------------------- 5. Create infer request -------------------------------------------------
    InferRequest infer_request = executable_network.CreateInferRequest();
    // -----------------------------------------------------------------------------------------------------
    
    // --------------------------- 6. Prepare input --------------------------------------------------------
    /** Collect images data ptrs **/
    std::vector<std::shared_ptr<unsigned char>> imagesData, originalImagesData;
    std::vector<int> imageWidths, imageHeights;
    for (auto & i : images) {
        FormatReader::ReaderPtr reader(i.c_str());
        if (reader.get() == nullptr) {
            slog::warn << "Image " + i + " cannot be read!" << slog::endl;
            continue;
        }
        /** Store image data **/
        std::shared_ptr<unsigned char> originalData(reader->getData());
        std::shared_ptr<unsigned char> data(reader->getData(inputInfo->getTensorDesc().getDims()[3], inputInfo->getTensorDesc().getDims()[2]));
        if (data.get() != nullptr) {
            originalImagesData.push_back(originalData);
            imagesData.push_back(data);
            imageWidths.push_back(reader->width());
            imageHeights.push_back(reader->height());
        }
    }
    if (imagesData.empty()) throw std::logic_error("Valid input images were not found!");
    
    size_t batchSize = network.getBatchSize();
    slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;
    if (batchSize != imagesData.size()) {
        slog::warn << "Number of images " + std::to_string(imagesData.size()) + \
            " doesn't match batch size " + std::to_string(batchSize) << slog::endl;
        slog::warn << std::to_string(std::min(imagesData.size(), batchSize)) + \
            " images will be processed" << slog::endl;
        batchSize = std::min(batchSize, imagesData.size());
    }
    
    /** Creating input blob **/
    Blob::Ptr imageInput = infer_request.GetBlob(imageInputName);
    
    /** Filling input tensor with images. First b channel, then g and r channels **/
    size_t num_channels = imageInput->getTensorDesc().getDims()[1];
    size_t image_size = imageInput->getTensorDesc().getDims()[3] * imageInput->getTensorDesc().getDims()[2];
    
    unsigned char* data = static_cast<unsigned char*>(imageInput->buffer());
    
    /** Iterate over all input images **/
    for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) {
        /** Iterate over all pixel in image (b,g,r) **/
        for (size_t pid = 0; pid < image_size; pid++) {
            /** Iterate over all channels **/
            for (size_t ch = 0; ch < num_channels; ++ch) {
                /**          [images stride + channels stride + pixel id ] all in bytes            **/
                data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid*num_channels + ch];
            }
        }
    }
    
    if (imInfoInputName != "") {
        Blob::Ptr input2 = infer_request.GetBlob(imInfoInputName);
        auto imInfoDim = inputsInfo.find(imInfoInputName)->second->getTensorDesc().getDims()[1];
    
        /** Fill input tensor with values **/
        float *p = input2->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
    
        for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) {
            p[image_id * imInfoDim + 0] = static_cast<float>(inputsInfo[imageInputName]->getTensorDesc().getDims()[2]);
            p[image_id * imInfoDim + 1] = static_cast<float>(inputsInfo[imageInputName]->getTensorDesc().getDims()[3]);
            for (int k = 2; k < imInfoDim; k++) {
                p[image_id * imInfoDim + k] = 1.0f;  // all scale factors are set to 1.0
            }
        }
    }
    // -----------------------------------------------------------------------------------------------------
    
    // ---------------------------- 7. Do inference --------------------------------------------------------
    slog::info << "Start inference (" << FLAGS_ni << " iterations)" << slog::endl;
    
    typedef std::chrono::high_resolution_clock Time;
    typedef std::chrono::duration<double, std::ratio<1, 1000>> ms;
    typedef std::chrono::duration<float> fsec;
    
    double total = 0.0;
    /** Start inference & calc performance **/
    for (int iter = 0; iter < FLAGS_ni; ++iter) {
        auto t0 = Time::now();
        infer_request.Infer();
        auto t1 = Time::now();
        fsec fs = t1 - t0;
        ms d = std::chrono::duration_cast<ms>(fs);
        total += d.count();
    }
    // -----------------------------------------------------------------------------------------------------
    
    // ---------------------------- 8. Process output ------------------------------------------------------
    slog::info << "Processing output blobs" << slog::endl;
    
    Blob::Ptr bbox_output_blob = infer_request.GetBlob(FLAGS_bbox_name);
    Blob::Ptr prob_output_blob = infer_request.GetBlob(FLAGS_prob_name);
    Blob::Ptr rois_output_blob = infer_request.GetBlob(FLAGS_proposal_name);
    
    std::vector<Blob::Ptr> detOutInBlobs = { bbox_output_blob, prob_output_blob, rois_output_blob };
    
    Blob::Ptr output_blob = std::make_shared<TBlob<float>>(Precision::FP32, Layout::NCHW, detectionOutLayerOutDims);
    output_blob->allocate();
    std::vector<Blob::Ptr> detOutOutBlobs = { output_blob };
    
    detOutPostProcessor.execute(detOutInBlobs, detOutOutBlobs, nullptr);
    
    const float* detection = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(output_blob->buffer());
    
    std::vector<std::vector<int> > boxes(batchSize);
    std::vector<std::vector<int> > classes(batchSize);
    
    /* Each detection has image_id that denotes processed image */
    for (int curProposal = 0; curProposal < maxProposalCount; curProposal++) {
        float image_id = detection[curProposal * objectSize + 0];
        float label = detection[curProposal * objectSize + 1];
        float confidence = detection[curProposal * objectSize + 2];
        float xmin = detection[curProposal * objectSize + 3] * imageWidths[image_id];
        float ymin = detection[curProposal * objectSize + 4] * imageHeights[image_id];
        float xmax = detection[curProposal * objectSize + 5] * imageWidths[image_id];
        float ymax = detection[curProposal * objectSize + 6] * imageHeights[image_id];
    
        /* MKLDnn and clDNN have little differente in DetectionOutput layer, so we need this check */
        if (image_id < 0 || confidence == 0) {
            continue;
        }
    
        std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence <<
            "    (" << xmin << "," << ymin << ")-(" << xmax << "," << ymax << ")" << " batch id : " << image_id;
    
        if (confidence > 0.5) {
            /** Drawing only objects with >50% probability **/
            classes[image_id].push_back(static_cast<int>(label));
            boxes[image_id].push_back(static_cast<int>(xmin));
            boxes[image_id].push_back(static_cast<int>(ymin));
            boxes[image_id].push_back(static_cast<int>(xmax - xmin));
            boxes[image_id].push_back(static_cast<int>(ymax - ymin));
            std::cout << " WILL BE PRINTED!";
        }
        std::cout << std::endl;
    }
    
    for (size_t batch_id = 0; batch_id < batchSize; ++batch_id) {
        addRectangles(originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id], boxes[batch_id], classes[batch_id]);
        const std::string image_path = "out_" + std::to_string(batch_id) + ".bmp";
        if (writeOutputBmp(image_path, originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id])) {
            slog::info << "Image " + image_path + " created!" << slog::endl;
        }
        else {
            throw std::logic_error(std::string("Can't create a file: ") + image_path);
        }
    }
    // -----------------------------------------------------------------------------------------------------
    std::cout << std::endl << "total inference time: " << total << std::endl;
    std::cout << "Average running time of one iteration: " << total / static_cast<double>(FLAGS_ni) << " ms" << std::endl;
    std::cout << std::endl << "Throughput: " << 1000 * static_cast<double>(FLAGS_ni) * batchSize / total << " FPS" << std::endl;
    std::cout << std::endl;
    
    /** Show performace results **/
    if (FLAGS_pc) {
        printPerformanceCounts(infer_request, std::cout);
    }
    

    }
    catch (const std::exception& error) {
    slog::err << error.what() << slog::endl;
    return 1;
    }
    catch (...) {
    slog::err << "Unknown/internal exception happened." << slog::endl;
    return 1;
    }

    slog::info << "Execution successful" << slog::endl;
    return 0;
    }

有如下报错:严重性 代码 说明 项目 文件 行 禁止显示状态
错误 LNK2019 无法解析的外部符号 CreateFormatReader,该符号在函数 "public: cdecl FormatReader::ReaderPtr::ReaderPtr(char const *)" (??0ReaderPtr@FormatReader@@QEAA@PEBD@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误(活动) 无法引用 函数 "InferenceEngine::make_so_pointer(const std::string &name) [其中 T=InferenceEngine::IExtension]" (已声明 所在行数:164,所属文件:"c:\Users\颜俊毅\Desktop\dldt-2018\inference-engine\include\details\ie_so_pointer.hpp") -- 它是已删除的函数 88999 c:\Users\颜俊毅\Documents\Visual Studio 2015\Projects\88999\88999\7521.cpp 102
错误 LNK2019 无法解析的外部符号 __imp_CreateDefaultAllocator,该符号在函数 "protected: virtual class std::shared_ptr const & __cdecl InferenceEngine::TBlob >::getAllocator(void)const " (?getAllocator@?$TBlob@HU?$enable_if@$00X@std@@@InferenceEngine@@MEBAAEBV?$shared_ptr@VIAllocator@InferenceEngine@@@std@@XZ) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: cdecl InferenceEngine::BlockingDesc::BlockingDesc(class std::vector > const &,class std::vector > const &)" (imp_??0BlockingDesc@InferenceEngine@@QEAA@AEBV?$vector@_KV?$allocator@_K@std@@@std@@0@Z),该符号在函数 "public: cdecl DetectionOutputPostProcessor::DetectionOutputPostProcessor(class InferenceEngine::CNNLayer const *)" (??0DetectionOutputPostProcessor@@QEAA@PEBVCNNLayer@InferenceEngine@@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: virtual cdecl InferenceEngine::BlockingDesc::~BlockingDesc(void)" (imp_??1BlockingDesc@InferenceEngine@@UEAA@XZ),该符号在函数 "public: cdecl DetectionOutputPostProcessor::DetectionOutputPostProcessor(class InferenceEngine::CNNLayer const *)" (??0DetectionOutputPostProcessor@@QEAA@PEBVCNNLayer@InferenceEngine@@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: cdecl InferenceEngine::TensorDesc::TensorDesc(class InferenceEngine::Precision const &,class std::vector >,class InferenceEngine::BlockingDesc const &)" (imp_??0TensorDesc@InferenceEngine@@QEAA@AEBVPrecision@1@V?$vector@_KV?$allocator@_K@std@@@std@@AEBVBlockingDesc@1@@Z),该符号在函数 "public: cdecl DetectionOutputPostProcessor::DetectionOutputPostProcessor(class InferenceEngine::CNNLayer const *)" (??0DetectionOutputPostProcessor@@QEAA@PEBVCNNLayer@InferenceEngine@@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: cdecl InferenceEngine::TensorDesc::TensorDesc(class InferenceEngine::Precision const &,class std::vector >,enum InferenceEngine::Layout)" (imp_??0TensorDesc@InferenceEngine@@QEAA@AEBVPrecision@1@V?$vector@_KV?$allocator@_K@std@@@std@@W4Layout@1@@Z),该符号在函数 "public: cdecl InferenceEngine::Blob::Blob(class InferenceEngine::Precision,enum InferenceEngine::Layout,class std::vector > const &)" (??0Blob@InferenceEngine@@QEAA@VPrecision@1@W4Layout@1@AEBV?$vector@_KV?$allocator@_K@std@@@std@@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: virtual cdecl InferenceEngine::TensorDesc::~TensorDesc(void)" (imp_??1TensorDesc@InferenceEngine@@UEAA@XZ),该符号在函数 "public: cdecl InferenceEngine::Blob::Blob(class InferenceEngine::TensorDesc)" (??0Blob@InferenceEngine@@QEAA@VTensorDesc@1@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: class std::vector > & cdecl InferenceEngine::TensorDesc::getDims(void)" (imp_?getDims@TensorDesc@InferenceEngine@@QEAAAEAV?$vector@_KV?$allocator@_K@std@@@std@@XZ),该符号在函数 "public: virtual void cdecl InferenceEngine::TBlob >::allocate(void)" (?allocate@?$TBlob@HU?$enable_if@$00X@std@@@InferenceEngine@@UEAAXXZ) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: class std::vector > const & cdecl InferenceEngine::TensorDesc::getDims(void)const " (imp_?getDims@TensorDesc@InferenceEngine@@QEBAAEBV?$vector@_KV?$allocator@_K@std@@@std@@XZ),该符号在函数 "public: unsigned int64 __cdecl InferenceEngine::Blob::byteSize(void)const " (?byteSize@Blob@InferenceEngine@@QEBA_KXZ) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: static enum InferenceEngine::Layout cdecl InferenceEngine::TensorDesc::getLayoutByDims(class std::vector >)" (imp_?getLayoutByDims@TensorDesc@InferenceEngine@@SA?AW4Layout@2@V?$vector@_KV?$allocator@_K@std@@@std@@@Z),该符号在函数 main 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "__declspec(dllimport) public: cdecl InferenceEngine::TensorDesc::TensorDesc(class InferenceEngine::TensorDesc const &)" (imp_??0TensorDesc@InferenceEngine@@QEAA@AEBV01@@Z),该符号在函数 "public: cdecl InferenceEngine::TBlob >::TBlob >(class InferenceEngine::TensorDesc const &)" (??0?$TBlob@HU?$enable_if@$00X@std@@@InferenceEngine@@QEAA@AEBVTensorDesc@1@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: cdecl InferenceEngine::Data::Data(class std::basic_string,class std::allocator > const &,class std::vector > const &,class InferenceEngine::Precision,enum InferenceEngine::Layout)" (imp_??0Data@InferenceEngine@@QEAA@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@AEBV?$vector@_KV?$allocator@_K@std@@@3@VPrecision@1@W4Layout@1@@Z),该符号在函数 main 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "__declspec(dllimport) public: class InferenceEngine::TensorDesc const & cdecl InferenceEngine::Data::getTensorDesc(void)const " (imp_?getTensorDesc@Data@InferenceEngine@@QEBAAEBVTensorDesc@2@XZ),该符号在函数 "public: virtual class std::map,class std::allocator >,class std::vector >,struct std::less,class std::allocator > >,class std::allocator,class std::allocator > const ,class std::vector > > > > cdecl InferenceEngine::CNNNetwork::getInputShapes(void)" (?getInputShapes@CNNNetwork@InferenceEngine@@UEAA?AV?$map@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@V?$vector@_KV?$allocator@_K@std@@@2@U?$less@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@@2@V?$allocator@U?$pair@$$CBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@V?$vector@_KV?$allocator@_K@std@@@2@@std@@@2@@std@@XZ) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: void cdecl InferenceEngine::Data::setPrecision(class InferenceEngine::Precision const &)" (imp_?setPrecision@Data@InferenceEngine@@QEAAXAEBVPrecision@2@@Z),该符号在函数 "public: void cdecl InferenceEngine::InputInfo::setPrecision(class InferenceEngine::Precision)" (?setPrecision@InputInfo@InferenceEngine@@QEAAXVPrecision@2@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2019 无法解析的外部符号 "
declspec(dllimport) public: cdecl InferenceEngine::Data::~Data(void)" (imp_??1Data@InferenceEngine@@QEAA@XZ),该符号在函数 "public: void * __cdecl InferenceEngine::Data::scalar deleting destructor'(unsigned int)" (??_GData@InferenceEngine@@QEAAPEAXI@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1
错误 LNK2019 无法解析的外部符号 __imp_findPlugin,该符号在函数 "public: class InferenceEngine::details::SOPointer<class InferenceEngine::IInferencePlugin,class InferenceEngine::details::SharedObjectLoader> __cdecl InferenceEngine::PluginDispatcher::getSuitablePlugin(enum InferenceEngine::TargetDevice)const " (?getSuitablePlugin@PluginDispatcher@InferenceEngine@@QEBA?AV?$SOPointer@VIInferencePlugin@InferenceEngine@@VSharedObjectLoader@details@2@@details@2@W4TargetDevice@2@@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1
错误 LNK2019 无法解析的外部符号 __imp_GetInferenceEngineVersion,该符号在函数 main 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1
错误 LNK2019 无法解析的外部符号 __imp_CreateCNNNetReader,该符号在函数 "public: __cdecl InferenceEngine::CNNNetReader::CNNNetReader(void)" (??0CNNNetReader@InferenceEngine@@QEAA@XZ) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1
错误 LNK2019 无法解析的外部符号 "__declspec(dllimport) public: __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::CpuExtensions(void)" (__imp_??0CpuExtensions@Cpu@Extensions@InferenceEngine@@QEAA@XZ),该符号在函数 "public: __cdecl std::_Ref_count_obj<class InferenceEngine::Extensions::Cpu::CpuExtensions>::_Ref_count_obj<class InferenceEngine::Extensions::Cpu::CpuExtensions><>(void)" (??$?0$$V@?$_Ref_count_obj@VCpuExtensions@Cpu@Extensions@InferenceEngine@@@std@@QEAA@XZ) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1
错误 LNK2019 无法解析的外部符号 "__declspec(dllimport) public: virtual __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::~CpuExtensions(void)" (__imp_??1CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAA@XZ),该符号在函数 "public: virtual void * __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::
scalar deleting destructor'(unsigned int)" (??_GCpuExtensions@Cpu@Extensions@InferenceEngine@@UEAAPEAXI@Z) 中被引用 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual void __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::GetVersion(struct InferenceEngine::Version const * &)const " (?GetVersion@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEBAXAEAPEBUVersion@4@@Z) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual void __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::Release(void)" (?Release@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAAXXZ) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual void __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::SetLogCallback(class InferenceEngine::IErrorListener &)" (?SetLogCallback@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAAXAEAVIErrorListener@4@@Z) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual void __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::Unload(void)" (?Unload@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAAXXZ) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual enum InferenceEngine::StatusCode __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::getFactoryFor(class InferenceEngine::ILayerImplFactory * &,class InferenceEngine::CNNLayer const *,struct InferenceEngine::ResponseDesc *)" (?getFactoryFor@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAA?AW4StatusCode@4@AEAPEAVILayerImplFactory@4@PEBVCNNLayer@4@PEAUResponseDesc@4@@Z) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual enum InferenceEngine::StatusCode __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::getPrimitiveTypes(char * * &,unsigned int &,struct InferenceEngine::ResponseDesc *)" (?getPrimitiveTypes@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAA?AW4StatusCode@4@AEAPEAPEADAEAIPEAUResponseDesc@4@@Z) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK2001 无法解析的外部符号 "public: virtual enum InferenceEngine::StatusCode __cdecl InferenceEngine::Extensions::Cpu::CpuExtensions::getShapeInferImpl(class std::shared_ptr &,char const *,struct InferenceEngine::ResponseDesc *)" (?getShapeInferImpl@CpuExtensions@Cpu@Extensions@InferenceEngine@@UEAA?AW4StatusCode@4@AEAV?$shared_ptr@VIShapeInferImpl@InferenceEngine@@@std@@PEBDPEAUResponseDesc@4@@Z) 88999 c:\Users\颜俊毅\documents\visual studio 2015\Projects\88999\88999\7521.obj 1

错误 LNK1120 27 个无法解析的外部命令 88999 c:\users\颜俊毅\documents\visual studio 2015\Projects\88999\x64\Debug\88999.exe 1

yuzying
yuzying 我也遇到这个问题,你解决了吗?
11 个月之前 回复

1个回答

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
Tensorflow Object Detection API Demo运行报错求?

![图片说明](https://img-ask.csdn.net/upload/201810/12/1539335033_129004.png) 这个能运行但是在jupyter运行Demo就报下面的错误是怎么回事..... ![图片说明](https://img-ask.csdn.net/upload/201810/12/1539335054_656491.png)

openvino demo 文件运行报错问题。

demo里面的 object detection demo 运行的时候出现错误如下 严重性 代码 说明 项目 文件 行 禁止显示状态 错误 C4996 'std::basic_string<char,std::char_traits<char>,std::allocator<char>>::copy': Call to 'std::basic_string::copy' with parameters that may be unsafe - this call relies on the caller to check that the passed values are correct. To disable this warning, use -D_SCL_SECURE_NO_WARNINGS. See documentation on how to use Visual C++ 'Checked Iterators' 88999 d:\open_model_zoo-2018\demos\extension\ext_list.hpp 56 是怎么回事,求各位老师解答

安装Tensorflow object detection API之后运行model_builder_test.py报错?

``` Traceback (most recent call last): File "G:\python\models\research\object_detection\builders\model_builder_test.py", line 23, in <module> from object_detection.builders import model_builder File "G:\python\models\research\object_detection\builders\model_builder.py", line 20, in <module> from object_detection.builders import anchor_generator_builder File "G:\python\models\research\object_detection\builders\anchor_generator_builder.py", line 22, in <module> from object_detection.protos import anchor_generator_pb2 File "G:\python\models\research\object_detection\protos\anchor_generator_pb2.py", line 29, in <module> dependencies=[object__detection_dot_protos_dot_flexible__grid__anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_grid__anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_multiscale__anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_ssd__anchor__generator__pb2.DESCRIPTOR,]) File "G:\python\python setup\lib\site-packages\google\protobuf\descriptor.py", line 879, in __new__ return _message.default_pool.AddSerializedFile(serialized_pb) TypeError: Couldn't build proto file into descriptor pool! Invalid proto descriptor for file "object_detection/protos/anchor_generator.proto": object_detection/protos/flexible_grid_anchor_generator.proto: Import "object_detection/protos/flexible_grid_anchor_generator.proto" has not been loaded. object_detection/protos/multiscale_anchor_generator.proto: Import "object_detection/protos/multiscale_anchor_generator.proto" has not been loaded. object_detection.protos.AnchorGenerator.multiscale_anchor_generator: "object_detection.protos.MultiscaleAnchorGenerator" seems to be defined in "protos/multiscale_anchor_generator.proto", which is not imported by "object_detection/protos/anchor_generator.proto". To use it here, please add the necessary import. object_detection.protos.AnchorGenerator.flexible_grid_anchor_generator: "object_detection.protos.FlexibleGridAnchorGenerator" seems to be defined in "protos/flexible_grid_anchor_generator.proto", which is not imported by "object_detection/protos/anchor_generator.proto". To use it here, please add the necessary import. ``` 网上找了各种方法都没用,有些可能有用的但是不够详细。

关于object detection运行视频检测代码出现报错:ValueError:assignment destination is read-only

我参考博主 withzheng的博客:https://blog.csdn.net/xiaoxiao123jun/article/details/76605928 在视频物体识别的部分中,我用的是Anaconda自带的spyder(python3.6)来运行他给的视频检测代码,出现了如下报错,![图片说明](https://img-ask.csdn.net/upload/201904/20/1555752185_448895.jpg) 具体报错: Moviepy - Building video video1_out.mp4. Moviepy - Writing video video1_out.mp4 t: 7%|▋ | 7/96 [00:40<09:17, 6.26s/it, now=None]Traceback (most recent call last): File "", line 1, in runfile('C:/models-master1/research/object_detection/object_detection_tutorial (1).py', wdir='C:/models-master1/research/object_detection') File "C:\Users\Administrator\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 710, in runfile execfile(filename, namespace) File "C:\Users\Administrator\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 101, in execfile exec(compile(f.read(),filename,'exec'), namespace) File "C:/models-master1/research/object_detection/object_detection_tutorial (1).py", line 273, in white_clip.write_videofile(white_output, audio=False) File "", line 2, in write_videofile File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\decorators.py", line 54, in requires_duration return f(clip, *a, **k) File "", line 2, in write_videofile File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\decorators.py", line 137, in use_clip_fps_by_default return f(clip, *new_a, **new_kw) File "", line 2, in write_videofile File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\decorators.py", line 22, in convert_masks_to_RGB return f(clip, *a, **k) File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\video\VideoClip.py", line 326, in write_videofile logger=logger) File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\video\io\ffmpeg_writer.py", line 216, in ffmpeg_write_video fps=fps, dtype="uint8"): File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\Clip.py", line 475, in iter_frames frame = self.get_frame(t) File "", line 2, in get_frame File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\decorators.py", line 89, in wrapper return f(*new_a, **new_kw) File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\Clip.py", line 95, in get_frame return self.make_frame(t) File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\Clip.py", line 138, in newclip = self.set_make_frame(lambda t: fun(self.get_frame, t)) File "C:\Users\Administrator\Anaconda3\lib\site-packages\moviepy\video\VideoClip.py", line 511, in return self.fl(lambda gf, t: image_func(gf(t)), apply_to) File "C:/models-master1/research/object_detection/object_detection_tutorial (1).py", line 267, in process_image image_process=detect_objects(image,sess,detection_graph) File "C:/models-master1/research/object_detection/object_detection_tutorial (1).py", line 258, in detect_objects line_thickness=8) File "C:\models-master1\research\object_detection\utils\visualization_utils.py", line 743, in visualize_boxes_and_labels_on_image_array use_normalized_coordinates=use_normalized_coordinates) File "C:\models-master1\research\object_detection\utils\visualization_utils.py", line 129, in draw_bounding_box_on_image_array np.copyto(image, np.array(image_pil)) ValueError: assignment destination is read-only 想问问各位大神有遇到过类似的问题吗。。如何解决?

JSONObject.fromObject,JSONArray.fromObject不执行,也不报错

JSONObject.fromObject,JSONArray.fromObject能输出(进入了jsons),但是执行到JSONObject.fromObject就不执行了,也不报错,请指教,下面有项目的架包。

WIN10环境object_detection api训练时报错:Windows fatal exception: access violation

报错内容: Windows fatal exception: access violation Current thread 0x00000e40 (most recent call first): File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\lib\io\file_io.py", line 84 in _preread_check File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\lib\io\file_io.py", line 122 in read File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\utils\label_map_util.py", line 138 in load_labelmap File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\utils\label_map_util.py", line 169 in get_label_map_dict File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\data_decoders\tf_example_decoder.py", line 64 in __init__ File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\data_decoders\tf_example_decoder.py", line 319 in __init__ File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\builders\dataset_builder.py", line 130 in build File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\inputs.py", line 579 in train_input File "C:\ProgramData\Anaconda3\lib\site-packages\object_detection-0.1-py3.7.egg\object_detection\inputs.py", line 476 in _train_input_fn File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 1116 in _call_input_fn File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 1025 in _get_features_and_labels_from_input_fn File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 1188 in _train_model_default File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 1161 in _train_model File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py", line 370 in train File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\training.py", line 714 in run_local File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\training.py", line 613 in run File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\training.py", line 473 in train_and_evaluate File ".\object_detection\model_main.py", line 105 in main File "C:\ProgramData\Anaconda3\lib\site-packages\absl\app.py", line 250 in _run_main File "C:\ProgramData\Anaconda3\lib\site-packages\absl\app.py", line 299 in run File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\platform\app.py", line 40 in run File ".\object_detection\model_main.py", line 109 in <module>

Tensorflow object-detection api 报错

我尝试使用ssd_mobilenet_v1模型,报错TypeError: `pred` must be a Tensor, or a Python bool, or 1 or 0. Found instead: None 不知道是什么原因引起的,is_training改成true的方法我已经试过了,没有用

在jupyter notebook上运行tensorflow目标识别官方测试代码object_detection_tutorial.ipynb,每次都是最后一个模块运行时出现“服务器挂了”,如何解决?

在annaconda中创建了tensorflow-gpu的环境,代码可以跑通,没有报错,但是每次到最后一块检测test_image 的时候就服务器挂了。 创建tensorflowcpu环境可以正常跑下来(最后显示那个输出结果),请问是为什么?如何解决呢? 对该环境用代码测试过,pycharm里,可以显示应用的显卡信息,算力等信息,应该是没有问题的。

为什么程序运行过程中提示运行报错

在VS2008调试环境下string提示是一个类类型,不能用做表达式,提示报错。 Partial Class _Default Inherits System.Web.UI.Page Protected Sub TextBox1_TextChanged(ByVal sender As Object, ByVal e As System.EventArgs) Handles txtName.TextChanged End Sub Protected Sub Button1_Click(ByVal sender As Object, ByVal e As System.EventArgs) Handles Button1.Click String strHtml = ""; strHtml += txtName.value + "<br/>"; strHtml += txtStreet.value +"<br/>"; strHtml += txtCity.value +"<br/>"; tdInnerHtml.innerHtml = strHtml; End Sub End Class

提问:测试Tensorflow object detection API,然后就出问题了?

# AttributeError: module 'tensorflow.python.keras' has no attribute 'Model' ![图片说明](https://img-ask.csdn.net/upload/201901/19/1547905831_372525.png) 大神们帮我看看怎么弄

python json解析出现No JSON object could be decoded的报错

源代码:#coding:utf-8 import requests import re import json url="http://www.newrank.cn/public/info/list.js?t=1461063208.68" user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' headers = { 'User-Agent' : user_agent } jscontent=requests.get("http://www.newrank.cn/public/info/list.js?t=1461063208.68",headers=headers).content jsdict=json.loads(jscontent) 错误信息:Traceback (most recent call last): File "D:/Python/JetBrains PyCharm 5.0.4/PyCharm 5.0.4/Myproject/test1/test2.py", line 10, in <module> jsdict=json.loads(jscontent) File "D:\Python\lib\json\__init__.py", line 339, in loads return _default_decoder.decode(s) File "D:\Python\lib\json\decoder.py", line 364, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "D:\Python\lib\json\decoder.py", line 382, in raw_decode raise ValueError("No JSON object could be decoded") ValueError: No JSON object could be decoded 是因为json的bom头问题吗,我也尝试过一些网上的去除bom头的办法,不过都不管用,还望指点,非常头疼,谢谢!

用python3爬链家的网页,遇到了AttributeError: 'str' object has no attribute 'select'的报错?

源码: ``` # -*- coding:utf-8 -*- from urllib import request from bs4 import BeautifulSoup import bs4 def get_data(): url = 'https://nj.lianjia.com/ershoufang/gulou/' headers = { 'User_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' } req = request.Request(url, headers=headers) response = request.urlopen(req) if response.getcode() == 200: data = response.read() data = str(data, encoding='utf-8') print(data) with open('index.html', mode='w', encoding='utf-8') as f: f.write(data) def parse_data(): with open('index.html', mode='r', encoding='utf-8') as f: html = f.read() bs = BeautifulSoup(html, 'html.parser') # 析HTML文件的时候就是用parser的 divs = bs.find_all(class_='clear LOGCLICKDATA')[0].get_text() for div in divs[0:]: div1 = div.select('.address')[0] print(div1) if __name__ == '__main__': # get_data() parse_data() ``` 报错显示是 div1 = div.select('.address')[0] 这一行出了问题 还请各位帮忙看看是怎么了

爬虫中加入请求头导致报错:'str' object has no attribute 'items

在写爬虫时为了规避反爬虫机制,加入了Chrome浏览器请求头.但是在 response = request.Request(url,headers=headers) 语句中加入headers=headers,运行程序时就会报错:'str' object has no attribute 'items.如果只用response = request.Request(url)就不会出现异常而得到正确的爬取结果.求指教,谢谢!!![图片说明](https://img-ask.csdn.net/upload/201910/14/1571064760_958800.png)![图片说明](https://img-ask.csdn.net/upload/201910/14/1571064782_395685.png)

python unittest执行程序在pycharm可以成功运行但使用cmd会报错

如题,在pycharm中执行主程序运行成功: 已连接主机 登录成功 正在发送...... 邮件发送完成~ Process finished with exit code 0 在cmd执行主程序报错: Traceback (most recent call last): File"all_test.py",line 32,in<module> alltestnames = creatsuite() File"all_test.py",line 27,in creatsuite testunit.addTests(test_case) File "C:\Python37\lib\unittest\suite.py",line57,in addTests for test in tests: TypeError:'_FailedTest'object is not iterable 报错的程序代码是这部分: def creatsuite(): testunit = unittest.TestSuite() test_dir = test_suite_dir package_tests = unittest.defaultTestLoader.discover(test_dir, pattern='*.py', top_level_dir=None) for test_suite in package_tests: for test_case in test_suite: testunit.addTests(test_case) print(testunit) return testunit 刚接触接口测试,请问是哪里出了问题呢?因为正在jenkins构建项目执行,把源码放在github后使用windows批处理命令执行,这样进程是停滞的,也结束不了。

IntelliJ IDEA运行HelloWorld报错

本人新手第一次用idea 编译第一个HelloWorld报如下错误 请大神帮我看看怎么解决啊! jdk用的1.8环境变量什么都没问题 在eclipse能跑起来 idea就不行了[图片说明](https://img-ask.csdn.net/upload/201711/12/1510481979_317607.png)

报错'DataFrame' object has no attribute 'convert_objects'

每次运行这段代码都会出现这个报错'DataFrame' object has no attribute 'convert_objects'。是pandas包的问题还是哪方面的问题呢?请问如何解决!万分感谢!! ![图片说明](https://img-ask.csdn.net/upload/201911/24/1574583379_426660.png)

JAVA设置函数参数类型为object,传入int型参数报错

public void insertFirst(Object data)//第一个位置插入节点 { GListNode node = new GListNode(data); if(this.First == null) { this.First = node; this.Last = node; this.First.Lift = this.First.Right = this.Last; this.Last.Lift = this.Last.Right = this.First; } else { node.Right = this.First; this.First = node; } Count++; } /*****************************************/ public GList GraphNode;//存储节点的信息 public Graph() { for(int i = 0;i < 100;i++) { GraphNode.insertFirst(1); } } /***********报错***********************/ 描述 资源 路径 位置 类型 类型 GList 中的方法 insertFirst(Object)对于参数(int)不适用 Graph.java /疏散模拟/src/gList 第 13 行 Java 问题

代码没有报错但是程序一运行就崩溃

``` public class WareShow extends Activity { private String[] bookname = new String[]{"从你的全世界路过","59秒的转动","心理罪","肖申克的救赎","疯狂Android讲义"}; private String[] penname = new String[]{"张嘉佳","周语","雷米","斯蒂芬","李刚"}; private String[] price = new String[]{"32¥","38¥","29¥","56¥","35¥"}; private int[] imageIds = new int[]{R.drawable.bg_01, R.drawable.bg_04, R.drawable.bg_02, R.drawable.bg_05, R.drawable.bg_03}; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.book); List<Map<String,Object>> listItem = new ArrayList<Map<String,Object>>(); for (int i = 0; i < bookname.length; i++) { Map<String, Object> listItem1 = new HashMap<String,Object>(); listItem1.put("book", bookname[i]); listItem1.put("pen", penname[i]); listItem1.put("money", price); listItem1.put("image", imageIds); } SimpleAdapter simpleAdapter = new SimpleAdapter(this,listItem,R.layout.book,new String[]{"book","pen","money","image"},new int[]{R.id.imageIds,R.id.tv1,R.id.tv2,R.id.tv3}); ListView listView = (ListView) findViewById(R.id.lv1); listView.setAdapter(simpleAdapter); } } ``` 这段代码没有报错但是运行时程序崩溃了

python报错 wxpython运行出错

AttributeError: 'module' object has no attribute 'frame'

Windows版YOLOv4目标检测实战:训练自己的数据集

课程演示环境:Windows10; cuda 10.2; cudnn7.6.5; Python3.7; VisualStudio2019; OpenCV3.4 需要学习ubuntu系统上YOLOv4的同学请前往:《YOLOv4目标检测实战:训练自己的数据集》 课程链接:https://edu.csdn.net/course/detail/28745 YOLOv4来了!速度和精度双提升! 与 YOLOv3 相比,新版本的 AP (精度)和 FPS (每秒帧率)分别提高了 10% 和 12%。 YOLO系列是基于深度学习的端到端实时目标检测方法。本课程将手把手地教大家使用labelImg标注和使用YOLOv4训练自己的数据集。课程实战分为两个项目:单目标检测(足球目标检测)和多目标检测(足球和梅西同时检测)。 本课程的YOLOv4使用AlexyAB/darknet,在Windows系统上做项目演示。包括:安装软件环境、安装YOLOv4、标注自己的数据集、整理自己的数据集、修改配置文件、训练自己的数据集、测试训练出的网络模型、性能统计(mAP计算)和先验框聚类分析。还将介绍改善YOLOv4目标检测性能的技巧。 除本课程《Windows版YOLOv4目标检测实战:训练自己的数据集》外,本人将推出有关YOLOv4目标检测的系列课程。请持续关注该系列的其它视频课程,包括: 《Windows版YOLOv4目标检测实战:人脸口罩佩戴检测》 《Windows版YOLOv4目标检测实战:中国交通标志识别》 《Windows版YOLOv4目标检测:原理与源码解析》

2019数学建模历年题目及优秀论文

2019数学建模历年题目及优秀论文 ,好资源与大家分享!!

华为海思数字IC提前批笔试题目,2020届华为海思校园招聘,西南地区

华为海思数字IC提前批笔试题目,2020届华为海思校园招聘,西南地区。单选和多选题华为海思数字IC提前批笔试题目,2020届华为海思校园招聘,西南地区。单选和多选题

Java基础知识面试题(2020最新版)

文章目录Java概述何为编程什么是Javajdk1.5之后的三大版本JVM、JRE和JDK的关系什么是跨平台性?原理是什么Java语言有哪些特点什么是字节码?采用字节码的最大好处是什么什么是Java程序的主类?应用程序和小程序的主类有何不同?Java应用程序与小程序之间有那些差别?Java和C++的区别Oracle JDK 和 OpenJDK 的对比基础语法数据类型Java有哪些数据类型switc...

三个项目玩转深度学习(附1G源码)

从事大数据与人工智能开发与实践约十年,钱老师亲自见证了大数据行业的发展与人工智能的从冷到热。事实证明,计算机技术的发展,算力突破,海量数据,机器人技术等,开启了第四次工业革命的序章。深度学习图像分类一直是人工智能的经典任务,是智慧零售、安防、无人驾驶等机器视觉应用领域的核心技术之一,掌握图像分类技术是机器视觉学习的重中之重。针对现有线上学习的特点与实际需求,我们开发了人工智能案例实战系列课程。打造:以项目案例实践为驱动的课程学习方式,覆盖了智能零售,智慧交通等常见领域,通过基础学习、项目案例实践、社群答疑,三维立体的方式,打造最好的学习效果。

软件测试2小时入门

本课程内容系统、全面、简洁、通俗易懂,通过2个多小时的介绍,让大家对软件测试有个系统的理解和认识,具备基本的软件测试理论基础。 主要内容分为5个部分: 1 软件测试概述,了解测试是什么、测试的对象、原则、流程、方法、模型;&nbsp; 2.常用的黑盒测试用例设计方法及示例演示;&nbsp; 3 常用白盒测试用例设计方法及示例演示;&nbsp; 4.自动化测试优缺点、使用范围及示例‘;&nbsp; 5.测试经验谈。

Python数据分析与挖掘

92讲视频课+16大项目实战+源码+¥800元课程礼包+讲师社群1V1答疑+社群闭门分享会=99元 &nbsp; 为什么学习数据分析? &nbsp; &nbsp; &nbsp; 人工智能、大数据时代有什么技能是可以运用在各种行业的?数据分析就是。 &nbsp; &nbsp; &nbsp; 从海量数据中获得别人看不见的信息,创业者可以通过数据分析来优化产品,营销人员可以通过数据分析改进营销策略,产品经理可以通过数据分析洞察用户习惯,金融从业者可以通过数据分析规避投资风险,程序员可以通过数据分析进一步挖掘出数据价值,它和编程一样,本质上也是一个工具,通过数据来对现实事物进行分析和识别的能力。不管你从事什么行业,掌握了数据分析能力,往往在其岗位上更有竞争力。 &nbsp;&nbsp; 本课程共包含五大模块: 一、先导篇: 通过分析数据分析师的一天,让学员了解全面了解成为一个数据分析师的所有必修功法,对数据分析师不在迷惑。 &nbsp; 二、基础篇: 围绕Python基础语法介绍、数据预处理、数据可视化以及数据分析与挖掘......这些核心技能模块展开,帮助你快速而全面的掌握和了解成为一个数据分析师的所有必修功法。 &nbsp; 三、数据采集篇: 通过网络爬虫实战解决数据分析的必经之路:数据从何来的问题,讲解常见的爬虫套路并利用三大实战帮助学员扎实数据采集能力,避免没有数据可分析的尴尬。 &nbsp; 四、分析工具篇: 讲解数据分析避不开的科学计算库Numpy、数据分析工具Pandas及常见可视化工具Matplotlib。 &nbsp; 五、算法篇: 算法是数据分析的精华,课程精选10大算法,包括分类、聚类、预测3大类型,每个算法都从原理和案例两个角度学习,让你不仅能用起来,了解原理,还能知道为什么这么做。

【大总结2】大学两年,写了这篇几十万字的干货总结

本文十天后设置为粉丝可见,喜欢的提前关注 不要白嫖请点赞 不要白嫖请点赞 不要白嫖请点赞 文中提到的书我都有电子版,可以评论邮箱发给你。 文中提到的书我都有电子版,可以评论邮箱发给你。 文中提到的书我都有电子版,可以评论邮箱发给你。 本篇文章应该算是Java后端开发技术栈的,但是大部分是基础知识,所以我觉得对任何方向都是有用的。 1、数据结构 数据结构是计算机存储、...

阿里巴巴高级面试题(首发、高频136道、含答案)

整理的136道阿里的Java面试题,都来挑战一下,看看自己有多厉害。下面题目都带超详细的解答,详情见底部。 java基础 Arrays.sort实现原理和Collection实现原理 foreach和while的区别(编译之后) 线程池的种类,区别和使用场景 分析线程池的实现原理和线程的调度过程 线程池如何调优 线程池的最大线程数目根据什么确定 动态代理的几种方式 HashMap的并发问题 了解LinkedHashMap的应用吗 反射的原理,反射创建类实例的三种方式是什么? clon

Java进阶高手课-核心篇

<p> <br> </p> <p> Java进阶的必经之路!<span></span> </p> <p> <br> </p> <p> <b>【超实用课程内容】</b><b></b> </p> <p> 本课程囊括了<span>Java</span>语言进阶的核心知识点,以真实场景项目实战为导向,循序渐进,深入浅出的了解Java并发编程、JVM虚拟机、网络编程和MySQL应用,讲解<span>Java</span>这门使用广泛的编程语言,助你能够游刃有余地游走在这些技术之中。<span> </span> </p> <p> <br> </p> <p> 套餐中一共包含<span>4</span>门<span>Java</span>进阶必学的核心知识(共<span>57</span>讲) </p> <p> 课程<span>1</span>:《<span>Java</span>进阶高手课<span>-</span>并发编程透彻理解》 </p> <p> 课程<span>2</span>:《<span>Java</span>进阶高手课<span>-</span>深入<span>JVM</span>虚拟机》 </p> <p> 课程<span>3</span>:《<span>Java</span>进阶高手课<span>-</span>深入浅出<span>Java</span>网络编程》 </p> <p> 课程<span>4</span>:《<span>Java</span>进阶高手课<span>-</span>必知必会<span>MySQL</span>》 </p> <p> <br> </p> <p> <strong>【</strong><strong>哪些人适合学习这门课程?</strong><strong>】</strong><strong></strong> </p> <p> 1)大学生,平时只接触了语言基础,并未学习深入语言内核; </p> <p> 2)对<span>Java</span>掌握程度薄弱的人,课程可以让你更好的理解<span>Java</span>语言原理及应用 </p> <p> 3)想修炼更好的<span>Java</span>内功,工作中遇到<span>Bug</span>可以游刃有余 </p> <p> 4)被面试官打破沙锅问到底的问题问到怀疑人生的应聘者 </p> <p> <br> </p> <p> <strong>【</strong><strong>你能收获到什么?</strong><strong>】</strong> </p> <p> 1.基础再提高,针对<span>Java</span>核心知识点学透,用对<span> </span> </p> <p> 2.能力再提高,日常工作中的代码换新貌,不怕问题<span> </span> </p> <p> 3.面试再加分,巴不得面试官打破沙锅问到底,竞争力<span>MAX</span> </p> <p> <br> <strong>【课程如何观看?】</strong> </p> <p> 1、登录<span>CSDN</span>学院<span> APP </span>在我的课程中进行学习; </p> <p> 2、移动端:<span>CSDN </span>学院<span>APP</span>(注意不是<span>CSDN APP</span>哦) </p> <p> 本课程为录播课,课程<span>2</span>年有效观看时长 </p> <p> <br> </p> <p class="ql-long-24357476"> <strong>【</strong><strong>资料开放</strong><strong>】</strong><strong></strong> </p> <p class="ql-long-24357476"> 课件、课程案例代码完全开放给你,你可以根据所学知识,自行修改、优化 </p> <p class="ql-long-24357476"> 下载方式:电脑登录课程观看页面,点击右下方课程资料、代码、课件等打包下载 </p> <p class="ql-long-24357476"> <img src="https://img-bss.csdn.net/202004200153008539.png" alt=""> </p> <p> <br> </p>

LINGO20200701.zip

2020第三届河北省研究生建模竞赛B题,利用lingo进行求解的代码。lingo在处理这类涉及下标的约束条件是个难点,希望我的代码能够提供一些帮助

oracle学习第三天

怎么建表? 根据需求建ER图 根据ER图和三范式建表 SQL structured query language 结构化查询语言 数据库执行语句 数据定义语言 (Data Definition Language, DDL) 是SQL语言集中负责数据结构定义与数据库对象定义的语言,由CREATE、ALTER与DROP三个语法所组成 数据控制语言(DCL:Data Control Language)是用来设置或者更改数据库用户或角色权限的语句,这些语句包括GRANT、REVOKE等语句 插入语句

2020联发科笔试题(IC方向)b.docx

文档位联发科公司的数字IC岗位笔试题,自己回忆版本,跟前一个A卷一起是完整版

使用TensorFlow+keras快速构建图像分类模型

课程分为两条主线: 1&nbsp;从Tensorflow的基础知识开始,全面介绍Tensorflow和Keras相关内容。通过大量实战,掌握Tensorflow和Keras经常用到的各种建模方式,参数优化方法,自定义参数和模型的手段,以及对训练结果评估与分析的技巧。 2&nbsp;从机器学习基础算法开始,然后进入到图像分类领域,使用MNIST手写数据集和CIFAR10图像数据集,从简单神经网络到深度神经网络,再到卷积神经网络,最终完成复杂模型:残差网络的搭建。完成这条主线,学员将可以自如地使用机器学习的手段来达到图像分类的目的。

点进来看看,为什么要学习【数据结构与算法】? 该如何学习?

【数据结构与算法】应该是大学计算机专业必修的一门课,为什么这门课会被列入到必修课的行列当中呢?因为对于每一个程序员来说,在以后的工作中不免要面对一些复杂的业务逻辑,同时要写对应的代码来实现这个复杂的业务。当然了,有很多种方式都可以实现同一个业务逻辑,但每种方式的代码运行效率可能不一样,我们接下来通过一个简单的例子感受一下。 例子: 假如你们班级里有一万个人,并且每个人的姓名和身份信息都存储在一个

《三天给你聊清楚redis》第2天看看redis怎么被搞出来的(22036字)

后端需要知道的关于redis的事,基本都在这里了。 此文后续会改为粉丝可见,所以喜欢的请提前关注。 你的点赞和评论是我创作的最大动力,谢谢。 3、单机实现 3.1、数据库概述 redis服务器将所有数据库都保存在redis/redisServer中,数组db存放所有数据库,每一项是一个redisdb结构。dbnum代表数据库数量。 客户端有一个指针指向当前数据库,可以切换,也就是移动指

中国全国省地县乡村五级地标kmz(很准)

中国全国省地县乡村五级地标kmz(很准),我也不知道写啥了,反正挺好用的

R语言入门基础

本课程旨在帮助学习者快速入门R语言: 课程系统详细地介绍了使用R语言进行数据处理的基本思路和方法。 课程能够帮助初学者快速入门数据处理。 课程通过大量的案例详细地介绍了如何使用R语言进行数据分析和处理 课程操作实际案例教学,通过编写代码演示R语言的基本使用方法和技巧

玩转Linux:常用命令实例指南

人工智能、物联网、大数据时代,Linux正有着一统天下的趋势,几乎每个程序员岗位,都要求掌握Linux。本课程零基础也能轻松入门。 本课程以简洁易懂的语言手把手教你系统掌握日常所需的Linux知识,每个知识点都会配合案例实战让你融汇贯通。课程通俗易懂,简洁流畅,适合0基础以及对Linux掌握不熟练的人学习; 【限时福利】 1)购课后按提示添加小助手,进答疑群,还可获得价值300元的编程大礼包! 2)本月购买此套餐加入老师答疑交流群,可参加老师的免费分享活动,学习最新技术项目经验。 --------------------------------------------------------------- 29元=掌握Linux必修知识+社群答疑+讲师社群分享会+700元编程礼包。 &nbsp;

利用 Python 爬取了 13966 条运维招聘信息,我得出了哪些结论?

作者:JackTian、黄伟呢 公众号:杰哥的IT之旅,后台回复:「运维」可获取本文完整数据 大家好,我是 JackTian。 我经常会收到读者关于一系列咨询运维方面的事情,比如:杰哥,运维到底是做什么的呀?运维的薪资水平/ 待遇怎么样呢?杰哥帮忙看下这个岗位的招聘需要对于小白来说,能否胜任的了呢?等等。 这里,我把之前写的《一篇文章带你解读从初级运维工程师到资深运维专家的学习路线》,本文从初级 / 中级 / 高级运维工程师以及到资深方向逐步展开给大家汇总了一些各阶段所具备的技能,仅供学习路线参考,如有.

2018年数学建模国赛A题matlab代码及注释.rar

2018年数学建模国赛A题matlab代码及注释,自己做题编写,实测有效,可以运行

微信公众平台开发入门

本套课程的设计完全是为初学者量身打造,课程内容由浅入深,课程讲解通俗易懂,代码实现简洁清晰。通过本课程的学习,学员能够入门微信公众平台开发,能够胜任企业级的订阅号、服务号、企业号的应用开发工作。 通过本课程的学习,学员能够对微信公众平台有一个清晰的、系统性的认识。例如,公众号是什么,它有什么特点,它能做什么,怎么开发公众号。 其次,通过本课程的学习,学员能够掌握微信公众平台开发的方法、技术和应用实现。例如,开发者文档怎么看,开发环境怎么搭建,基本的消息交互如何实现,常用的方法技巧有哪些,真实应用怎么开发。

实用主义学Python(小白也容易上手的Python实用案例)

3折秒杀! 系统掌握Python核心语法16点,轻松应对工作中80%以上的Python使用场景! 69元=72讲+源码+社群答疑+讲师社群分享会&nbsp; 【哪些人适合学习这门课程?】 1)大学生,平时只学习了Python理论,并未接触Python实战问题; 2)对Python实用技能掌握薄弱的人,自动化、爬虫、数据分析能让你快速提高工作效率; 3)想学习新技术,如:人工智能、机器学习、深度学习等,这门课程是你的必修课程; 4)想修炼更好的编程内功,优秀的工程师肯定不能只会一门语言,Python语言功能强大、使用高效、简单易学。 【超实用技能】 从零开始 自动生成工作周报 职场升级 豆瓣电影数据爬取 实用案例 奥运冠军数据分析 自动化办公:通过Python自动化分析Excel数据并自动操作Word文档,最终获得一份基于Excel表格的数据分析报告。 豆瓣电影爬虫:通过Python自动爬取豆瓣电影信息并将电影图片保存到本地。 奥运会数据分析实战 简介:通过Python分析120年间奥运会的数据,从不同角度入手分析,从而得出一些有趣的结论。 【超人气老师】 二两 中国人工智能协会高级会员 生成对抗神经网络研究者 《深入浅出生成对抗网络:原理剖析与TensorFlow实现》一书作者 阿里云大学云学院导师 前大型游戏公司后端工程师 【超丰富实用案例】 0)图片背景去除案例 1)自动生成工作周报案例 2)豆瓣电影数据爬取案例 3)奥运会数据分析案例 4)自动处理邮件案例 5)github信息爬取/更新提醒案例 6)B站百大UP信息爬取与分析案例 7)构建自己的论文网站案例

赵强老师:大数据从入门到精通(套餐)

本系列课程将基于RedHat Linux 7.4版本、Hadoop 2.7.3、Spark 2 版本全面介绍大数据的整体内容,让学员深入理解并掌握运行机制和原理,从而进一步掌握大数据的相关内容。

Python+OpenCV计算机视觉

Python+OpenCV计算机视觉系统全面的介绍。

2020联发科笔试题(IC方向)a.docx

文档为自己回忆 准确度高;2019年2020届笔试题;文档分上下两个章节,

Python代码实现飞机大战

文章目录经典飞机大战一.游戏设定二.我方飞机三.敌方飞机四.发射子弹五.发放补给包六.主模块 经典飞机大战 源代码以及素材资料(图片,音频)可从下面的github中下载: 飞机大战源代码以及素材资料github项目地址链接 ————————————————————————————————————————————————————————— 不知道大家有没有打过飞机,喜不喜欢打飞机。当我第一次接触这个东西的时候,我的内心是被震撼到的。第一次接触打飞机的时候作者本人是身心愉悦的,因为周边的朋友都在打飞机, 每

玩转 Spring 全家桶

<p> 新课上新福利:本套餐共包含 4 大课程,原价 ¥199,今日立减¥100,到手仅需¥99! </p> <p> <br> </p> <p class="MsoNormal" align="left"> <b>为什么每一个程序员都需要掌握</b><b> Java Spring </b><b>全家桶?</b><b><br> </b><b></b> </p> <p class="MsoNormal" align="left"> 1. Spring在如今Java的应用市场以及就业市场中,都成为炙手可热的部分。面试出镜率之高,市场应用之广,让其成为Java开发人员的必备技能之一,唯有系统的掌握Spring全家桶的思想、设计、以及实现,才可以让开发过程变得更容易、更高效,以适应当前市场的快速发展。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>本套餐内容介绍:</b><b></b> </p> <p class="MsoNormal" align="left"> 本课程所有内容基于最新的Spring  Boot 2.2.5.RELEASE 版本,和Spring Cloud Hoxton.SR3 版本,其中大量的独家案例,以及本人多年的经验讲解。 </p> <p class="MsoNormal" align="left"> 课程循序渐进,内容中有Spring的核心基础内容,也有高级的进阶内容。 </p> <p class="MsoNormal" align="left"> 内容分为五大部分,分别是: </p> <p class="MsoNormal" align="left"> <b>初识篇</b><b>Spring Framework</b>:主要讲解Spring的核心内容IOC及AOP。本篇对Spring的基本概念进行大致的讲解,但主要的技术点皆以案例为主,同时会对项目中都会用到的日志进行讲解及案例说明。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>基础篇</b><b>Spring MVC</b><b>:</b>主要以案例的方式讲解Spring的前端部分,比如表单、验证、国际化、主题、文件上传等,以及高阶内容过滤器、拦截器、多种异常处理方法 ,以及日志Log4j2及SLF4J的使用。最后,会结合Spring、Spring MVC及MyBatis(即SSM)进行产品的增删改查的案例讲解。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>核心篇</b><b>Spring Boot</b><b>:</b>主要以案例的方式对如何使用Spring Boot开发进行讲解,包括热部署,扩展的启动行为,整合JSON、Thymeleaf、Freemarker、JSP的Web开发,全局异常处理,配置文件的使用,及Logback&SLF4J的日志使用,国际化,最后,会结合Spring Boot及MyBatis进行产品的增删改查的案例讲解。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>核心篇</b><b>Spring Cloud</b><b>:</b>以案例的方式,结合Spring Boot,对注册中心,消费者与生产者,服务的调用OpenFeign,Hystrix监控,服务网关Gateway,消息驱动微服务Stream,分布式集群,及分布式配置中心进行讲解,使得我们快速掌握微服务的开发方法。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>核心篇</b><b>Spring Data</b><b>:</b>主要以案例的方式讲解持久层的开发,分别对Spring Data JPA 及 Spring Data JDBC 进行讲解。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>学习收获:</b><b></b> </p> <p class="MsoNormal" align="left"> 1、使用 Spring MVC 快速开发; </p> <p class="MsoNormal" align="left"> 2、搞定 Spring Boot 核心的实现原理; </p> <p class="MsoNormal" align="left"> 3、通过 Spring Cloud 快速构建微服务架构; </p> <p class="MsoNormal" align="left"> 4、掌握 Spring 的使用和开发技巧。 </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <br> </p> <p class="MsoNormal" align="left"> <b>讲师介绍</b><b></b> </p> <p class="MsoNormal" align="left"> 杨春娟,05年毕业于清华大学,之后致力于 JavaEE 的设计开发,12年开始致力于 Hybris 的架构、设计及开发,及管理工作。精通C/ C++,Java, JavaEE,MySql,精通SAP Hybris等,善于将复杂的问题简单化并教授给学生,善于培养学生独立思考问题的能力。 </p> <p> <br> </p> <p> <img src="https://img-bss.csdn.net/202005180944265006.png" alt=""> </p> <p> <img src="https://img-bss.csdn.net/202005080326195251.png" alt=""> </p>

linux下利用/proc进行进程树的打印

在linux下利用c语言实现的进程树的打印,主要通过/proc下的目录中的进程文件,获取status中的进程信息内容,然后利用递归实现进程树的打印

相关热词 c#设计思想 c#正则表达式 转换 c#form复制 c#写web c# 柱形图 c# wcf 服务库 c#应用程序管理器 c#数组如何赋值给数组 c#序列化应用目的博客园 c# 设置当前标注样式
立即提问