关于livecharts官网中提供的Features->Events的示例问题?

livecharts示例代码:https://lvcharts.net/App/examples/v1/wpf/Events

我应用于wpf项目中的代码:

ScrollableViewModel.cs

using LiveCharts;
using LiveCharts.Defaults;
using LiveCharts.Geared;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Input;

namespace WpfApp1.Resources.UserControls.Scrollable
{
    public class ScrollableViewModel : INotifyPropertyChanged
    {
        private Func<double, string> _formatter;
        private Func<double, string> _xformatter;
        private double _from;
        private double _to;
        private double _min;
        private double _max;

        public ScrollableViewModel()
        {
            DataClickCommand = new MyCommand<ChartPoint>
            {
                ExecuteDelegate = p => From = Values.FirstOrDefault().DateTime.Ticks
            };

            var now = DateTime.Now;
            var trend = -30000d;
            var l = new List<DateTimePoint>();
            var Eventl = new List<VisitsByDateTime>();
            var r = new Random();

            for (var i = 0; i < 50000; i++)
            {
                now = now.AddMilliseconds(100);

                double xx = Convert.ToDateTime(now).ToOADate();
                //每隔100毫秒
                l.Add(new DateTimePoint(now, trend));

                //每隔一分鐘造一個事件
                if (i % 600 == 0) Eventl.Add(new VisitsByDateTime { DateTime = now, Total = (decimal)1 });

                if (r.NextDouble() > 0.4)
                {
                    trend += r.NextDouble() * 10;
                }
                else
                {
                    trend -= r.NextDouble() * 10;
                }
            }

            Formatter = x => new DateTime((long)x).ToString("dd日HH时mm分ss秒fff毫秒");
            XFormatter = x => new DateTime((long)x).ToString("dd日HH时mm分");

            Values = l.AsGearedValues().WithQuality(Quality.High);
            EventValues = Eventl.AsGearedValues();

            From = DateTime.Now.AddMilliseconds(10000).Ticks;
            To = DateTime.Now.AddMilliseconds(900000).Ticks;
            Min = Values.FirstOrDefault().DateTime.Ticks;
            Max = Values.LastOrDefault().DateTime.Ticks;
        }

        public MyCommand<ChartPoint> DataClickCommand { get; set; }

        public object Mapper { get; set; }
        //波形
        public GearedValues<DateTimePoint> Values { get; set; }
        //事件
        public GearedValues<VisitsByDateTime> EventValues { get; set; }
        public double From
        {
            get { return _from; }
            set
            {
                _from = value;
                OnPropertyChanged("From");
            }
        }
        public double To
        {
            get { return _to; }
            set
            {
                _to = value;
                OnPropertyChanged("To");
            }
        }
        public double Min
        {
            get { return _min; }
            set
            {
                _min = value;
                OnPropertyChanged("Min");
            }
        }
        public double Max
        {
            get { return _max; }
            set
            {
                _max = value;
                OnPropertyChanged("Max");
            }
        }

        public Func<double, string> Formatter
        {
            get { return _formatter; }
            set
            {
                _formatter = value;
                OnPropertyChanged("Formatter");
            }
        }

        public Func<double, string> XFormatter
        {
            get { return _xformatter; }
            set
            {
                _xformatter = value;
                OnPropertyChanged("XFormatter");
            }
        }

        public event PropertyChangedEventHandler PropertyChanged;

        protected virtual void OnPropertyChanged(string propertyName = null)
        {
            if (PropertyChanged != null)
                PropertyChanged.Invoke(this, new PropertyChangedEventArgs(propertyName));
        }
    }

    public class MyCommand<T> : ICommand where T : class
    {
        public Predicate<T> CanExecuteDelegate { get; set; }
        public Action<T> ExecuteDelegate { get; set; }

        public bool CanExecute(object parameter)
        {
            return CanExecuteDelegate == null || CanExecuteDelegate((T)parameter);
        }

        public void Execute(object parameter)
        {
            if (ExecuteDelegate != null) ExecuteDelegate((T)parameter);
        }

        public event EventHandler CanExecuteChanged
        {
            add { CommandManager.RequerySuggested += value; }
            remove { CommandManager.RequerySuggested -= value; }
        }
    }
}

ScrollableViewE.xaml

<UserControl x:Class="WpfApp1.Resources.UserControls.ScrollableE.ScrollableViewE"
             xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
             xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
             xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" 
             xmlns:d="http://schemas.microsoft.com/expression/blend/2008" 
             xmlns:local="clr-namespace:WpfApp1.Resources.UserControls.ScrollableE"
             xmlns:lvc="clr-namespace:LiveCharts.Wpf;assembly=LiveCharts.Wpf"
             xmlns:geared="clr-namespace:LiveCharts.Geared;assembly=LiveCharts.Geared"
             xmlns:scrollable="clr-namespace:WpfApp1.Resources.UserControls.Scrollable"
             mc:Ignorable="d" 
             d:DesignHeight="450" d:DesignWidth="800">
    <UserControl.DataContext>
        <scrollable:ScrollableViewModel></scrollable:ScrollableViewModel>
    </UserControl.DataContext>
    <Grid>
        <lvc:CartesianChart Name="ScrollChart" 
                            DisableAnimations="True" 
                            ClipToBounds="True"
                            Zoom="X"
                            DataClick="ChartOnDataClick"
                            Margin="20 10">
            <lvc:CartesianChart.Resources>
                <Style TargetType="lvc:Separator">
                    <Setter Property="IsEnabled" Value="False"></Setter>
                </Style>
            </lvc:CartesianChart.Resources>
            <lvc:CartesianChart.Series>
                <geared:GColumnSeries Values="{Binding EventValues}" StrokeThickness="3"/>
                <geared:GLineSeries Values="{Binding Values}" 
                                    LineSmoothness="0"
                                    StrokeThickness="2" 
                                    Stroke="#00e701"
                                    PointGeometry="{x:Null}"
                                    AreaLimit="0"/>
            </lvc:CartesianChart.Series>
            <lvc:CartesianChart.AxisX>
                <lvc:Axis IsMerged="True" LabelFormatter="{Binding XFormatter, Mode=OneTime}"
                          MinValue="{Binding Min, Mode=TwoWay}"
                          MaxValue="{Binding Max, Mode=TwoWay}"
                          Foreground="#fff"
                          FontSize="12"
                          FontWeight="UltraBold"/>
            </lvc:CartesianChart.AxisX>
            <lvc:CartesianChart.AxisY>
                <lvc:Axis ShowLabels="False" />
            </lvc:CartesianChart.AxisY>
        </lvc:CartesianChart>
    </Grid>
</UserControl>

ScrollableViewE.cs

using LiveCharts;
using LiveCharts.Wpf;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;

namespace WpfApp1.Resources.UserControls.ScrollableE
{
    /// <summary>
    /// ScrollableViewE.xaml 的交互逻辑
    /// </summary>
    public partial class ScrollableViewE : UserControl
    {
        public ScrollableViewE()
        {
            InitializeComponent();
        }
        private void ChartOnDataClick(object sender, ChartPoint p)
        {
            Console.WriteLine("[EVENT] You clicked (" + p.X + ", " + p.Y + ")");
        }
    }
}

运行截图:
图片说明

问题:当我点击折线图的时候可以进入到ChartOnDataClick方法,但是为什么点击柱状图的时候没反应呢?怎么才能让点击柱状图的时候进入到ChartOnDataClick方法?

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
mxnet中'DataLoader' object is not callable是什么情况,我是按书这么写的
这是一个高位线性回归实验的代码,具体在动手学深度学习mxnet版的p66-p68页 %matplotlib inline import d2lzh as d2l from mxnet import autograd, gluon, init, nd from mxnet.gluon import data as gdata, loss as gloss, nn n_train, n_test, num_inputs = 20, 100, 200 true_w, true_b = nd.ones((num_inputs, 1)) * 0.01, 0.05 features = nd.random.normal(shape=(n_train + n_test, num_inputs)) labels = nd.dot(features, true_w) + true_b labels += nd.random.normal(scale=0.01, shape=labels.shape) train_features, test_features = features[:n_train, :], features[n_train:, :] train_labels, test_labels = labels[:n_train], labels[n_train:] def init_params(): w = nd.random.normal(scale=1, shape=(num_inputs, 1)) b = nd.zeros(shape=(1,)) w.attach_grad() b.attach_grad() return [w,b] def l2_penalty(w): return (w**2).sun() / 2 batch_size, num_epochs, lr = 1, 100, 0.003 net, loss = d2l.linreg, d2l.squared_loss train_iter = gdata.DataLoader(gdata.ArrayDataset(train_features, train_labels), batch_size, shuffle=True, num_workers=0) def fit_and_plot(lambd): w, b = init_params() train_ls, test_ls = [], [] for _ in range(num_epochs): for X, y in train_iter(): with autograd.record(): #添加了L2范数惩罚项 l = loss(net(X, w, b), y) + lambd * l2_penalty(w) l.backward() d2l.sgd([w, b], lr, batch_size) train_ls.append(loss(net(train_features, w, b), train_labels).mean().asscalar()) test_ls.append(loss(net(test_features, w, b), test_labels).mean().asscalar()) d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', ' test']) print('L2 norm of w:', w.norm().asscalar()) fit_and_plot(lambd=0) 这一些代码编译之后 出现是这个 --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-25-d042fb36ac1a> in <module> ----> 1 fit_and_plot(lambd=0) <ipython-input-24-1a36f25d56d9> in fit_and_plot(lambd) 7 train_ls, test_ls = [], [] 8 for _ in range(num_epochs): ----> 9 for X, y in train_iter(): 10 with autograd.record(): 11 #添加了L2范数惩罚项 TypeError: 'DataLoader' object is not callable 想问问 各位大佬,谢谢了
创建Vuecli3项目时,选择Manually select 出现乱码?如图
![图片说明](https://img-ask.csdn.net/upload/202001/26/1580025321_715192.png) 输入vue create 项目名字之后,第二个选项就出现乱码了Manually select features  选择第二个后就出现图中的情况 如图,有乱码,有什么解决的办法没?
TensorFlow报错:Shape (44, ?) must have rank at least 3 ?
用TensorFlow执行RNN,报错ValueError: Shape (44, ?) must have rank at least 3,下面是程序的部分代码,请问应该在哪里修改下?谢谢 解析函数: ``` feature = ['feature1',......,'feature44'] label = 'label2' featureNames = list(feature) featureNames.append(label) columns = [tf.FixedLenFeature(shape=[1], dtype=tf.float32) for k in featureNames] featuresDict = dict(zip(featureNames, columns)) def parse_tfrecord(example_proto): parsed_features = tf.parse_single_example(example_proto, featuresDict) labels = parsed_features.pop(label) return parsed_features, tf.cast(labels, tf.int32) ``` 输入函数(原始数据是有44个特征值的数值序列,每个序列为一样本): ``` def tfrecord_input_fn(fileName,numEpochs=None,shuffle=True,batchSize=None): #读取tfrecord数据 dataset = tf.data.TFRecordDataset(fileName, compression_type='GZIP') #执行解析函数 dataset = dataset.map(parse_tfrecord) #打乱数据 if shuffle: dataset = dataset.shuffle(buffer_size=batchSize * 100*numEpochs) #每32个样本作为一个batch dataset = dataset.batch(32) #重复数据 dataset = dataset.repeat(numEpochs) print('features:',features) print('labels:',labels) iterator = dataset.make_one_shot_iterator() features, labels = iterator.get_next() return features, labels ``` 打印返回值结果: ``` features: {'feature1': <tf.Tensor 'IteratorGetNext_21:0' shape=(?, 1) dtype=float32>, 'feature2': <tf.Tensor 'IteratorGetNext_21:1' shape=(?, 1) dtype=float32>,......, 'feature44': <tf.Tensor 'IteratorGetNext_21:43' shape=(?, 1) dtype=float32>} labels: Tensor("IteratorGetNext_21:44", shape=(?, 1), dtype=int32) ``` 执行网络后报错: ``` ValueError: Shape (44, ?) must have rank at least 3 ```
NXOPEN Features::TubeBuilder从Block UI获取路径pathsection()​的问题
//从Block UI(截面构建器)获取曲线,赋值给vector <NXOpen::TaggedObject *>类型变量tagObjArray NXOpen::BlockStyler::PropertyList* SectionToTubeproplist = SectionToTube->GetProperties(); std::vector<NXOpen::TaggedObject *>tagObjArray = SectionToTubeproplist->GetTaggedObjectVector("SelectedObjects"); NXOpen::Features::Feature* nullFeatures_Feature = NULL; NXOpen::Features::TubeBuilder* tubeBuilder1 = workPart->Features()->CreateTubeBuilder(nullFeatures_Feature); //定义管道路径 tubeBuilder1->PathSection()->AddToSection(); 到这一步需要如何把tagObjArray对应的曲线转成管道路径? 求大神指导! 谢谢。
如何解决cannot import name 'dense_features' from 'tensorflow.python.feature_column'
出现了cannot import name 'dense_features' from 'tensorflow.python.feature_column'的问题,tensorflow是1.14.0版本,尝试过重新安装,无法解决,安装的其他package如下 ![图片说明](https://img-ask.csdn.net/upload/201909/24/1569296426_853190.png)
NXOPEN C++: Features::TubeBuilder从Block UI获取路径pathsection()​的问题?
//从Block UI(截面构建器)获取曲线,赋值给vector <NXOpen::TaggedObject *>类型变量tagObjArray NXOpen::BlockStyler::PropertyList* SectionToTubeproplist = SectionToTube->GetProperties(); std::vector<NXOpen::TaggedObject *>tagObjArray = SectionToTubeproplist->GetTaggedObjectVector("SelectedObjects"); NXOpen::Features::Feature* nullFeatures_Feature = NULL; NXOpen::Features::TubeBuilder* tubeBuilder1 = workPart->Features()->CreateTubeBuilder(nullFeatures_Feature); //定义管道路径 tubeBuilder1->PathSection()->AddToSection(); 到这一步需要如何把tagObjArray对应的曲线转成管道路径? 求大神指导! 谢谢。
编译PX4时出现下面错误怎么办error: non-constant condition for static assertion
在Ubuntu16.04版本下,编译PX4时,总是出现下面的情况,不知道是什么原因,有哪位大神帮忙解答吗? u@ubuntu:~/src/Firmware$ make px4_fmu-v2_default -- PX4 config file: /home/hanlu/src/Firmware/boards/px4/fmu-v2/default.cmake -- PX4 config: px4_fmu-v2_default -- PX4 platform: nuttx -- PX4 lockstep: disabled -- PX4 version: v1.9.0-216-gbef7a9b -- cmake build type: MinSizeRel -- The CXX compiler identification is GNU 4.9.3 -- The C compiler identification is GNU 4.9.3 -- The ASM compiler identification is GNU -- Found assembler: /opt/gcc-arm-none-eabi-4_9-2015q3/bin/arm-none-eabi-gcc -- Check for working CXX compiler using: Ninja -- Check for working CXX compiler using: Ninja -- works -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features -- Detecting CXX compile features - done -- Check for working C compiler using: Ninja -- Check for working C compiler using: Ninja -- works -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Detecting C compile features -- Detecting C compile features - done -- Found PythonInterp: /usr/bin/python (found version "2.7.12") -- Found PY_jinja2: /usr/local/lib/python2.7/dist-packages/Jinja2-2.7.3-py2.7.egg/jinja2 -- PX4 ECL: Very lightweight Estimation & Control Library v1.9.0-rc1-5-g54ac147 -- Building and including px4_io-v2_default -- ROMFS: px4fmu_common -- ROMFS: Adding rc.board_defaults -- ROMFS: Adding rc.board_sensors -- Configuring done -- Generating done -- Build files have been written to: /home/hanlu/src/Firmware/build/px4_fmu-v2_default [1/690] git submodule platforms/nuttx/NuttX/nuttx [4/690] git submodule platforms/nuttx/NuttX/apps [5/690] git submodule src/lib/ecl [10/690] git submodule src/drivers/gps/devices [16/690] git submodule mavlink/include/mavlink/v2.0 [24/690] Performing configure step for 'px4io_firmware' -- PX4 config file: /home/hanlu/src/Firmware/boards/px4/io-v2/default.cmake -- PX4 config: px4_io-v2_default -- PX4 platform: nuttx -- PX4 lockstep: disabled -- PX4 version: v1.9.0-216-gbef7a9b -- cmake build type: MinSizeRel -- The CXX compiler identification is GNU 4.9.3 -- The C compiler identification is GNU 4.9.3 -- The ASM compiler identification is GNU -- Found assembler: /opt/gcc-arm-none-eabi-4_9-2015q3/bin/arm-none-eabi-gcc -- Check for working CXX compiler using: Ninja -- Check for working CXX compiler using: Ninja -- works -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features -- Detecting CXX compile features - done -- Check for working C compiler using: Ninja -- Check for working C compiler using: Ninja -- works -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Detecting C compile features -- Detecting C compile features - done -- Found PythonInterp: /usr/bin/python (found version "2.7.12") -- Found PY_jinja2: /usr/local/lib/python2.7/dist-packages/Jinja2-2.7.3-py2.7.egg/jinja2 -- PX4 ECL: Very lightweight Estimation & Control Library v1.9.0-rc1-5-g54ac147 -- Configuring done -- Generating done -- Build files have been written to: /home/hanlu/src/Firmware/build/px4_fmu-v2_default/external/Build/px4io_firmware [25/690] Performing build step for 'px4io_firmware' [1/200] git submodule platforms/nuttx/NuttX/nuttx [5/200] git submodule platforms/nuttx/NuttX/apps [39/200] Building CXX object src/platforms/common/CMakeFiles/px4_platform.dir/module.cpp.obj FAILED: /opt/gcc-arm-none-eabi-4_9-2015q3/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_IO_V2 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../../../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../../../../boards/px4/io-v2/src -I. -Isrc -Isrc/lib -Isrc/modules -I../../../../../src -I../../../../../src/include -I../../../../../src/lib -I../../../../../src/lib/DriverFramework/framework/include -I../../../../../src/lib/matrix -I../../../../../src/modules -I../../../../../src/platforms -I../../../../../src/platforms/common -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -Iexternal/Install/include -mcpu=cortex-m3 -mthumb -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-implicit-fallthrough -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -D__STDC_VERSION__=199901L -nostdinc++ -std=gnu++11 -MMD -MT src/platforms/common/CMakeFiles/px4_platform.dir/module.cpp.obj -MF src/platforms/common/CMakeFiles/px4_platform.dir/module.cpp.obj.d -o src/platforms/common/CMakeFiles/px4_platform.dir/module.cpp.obj -c ../../../../../src/platforms/common/module.cpp In file included from ../../../../../src/platforms/px4_module.h:44:0, from ../../../../../src/platforms/common/module.cpp:43: ../../../../../src/platforms/px4_atomic.h: In instantiation of 'class px4::atomic<bool>': ../../../../../src/platforms/px4_module.h:421:42: required from here ../../../../../src/platforms/px4_atomic.h:72:2: error: non-constant condition for static assertion static_assert(__atomic_always_lock_free(sizeof(T), 0), "atomic is not lock-free for the given type T"); ^ compilation terminated due to -Wfatal-errors. [39/200] Building CXX object msg/CMakeFiles/uorb_msgs.dir/topics_sources/gps_inject_data.cpp.obj ninja: build stopped: subcommand failed. [108/690] Building CXX object msg/CMakeFiles/uorb_msgs.dir/topics_sources/rc_parameter_map.cpp.obj FAILED: cd /home/hanlu/src/Firmware/build/px4_fmu-v2_default/external/Build/px4io_firmware && /usr/bin/cmake --build . && /usr/bin/cmake -E touch /home/hanlu/src/Firmware/build/px4_fmu-v2_default/external/Stamp/px4io_firmware/px4io_firmware-build [108/690] Building CXX object msg/CMakeFiles/uorb_msgs.dir/topics_sources/rc_parameter_map.cpp.obj ninja: build stopped: subcommand failed. Makefile:193: recipe for target 'px4_fmu-v2_default' failed make: *** [px4_fmu-v2_default] Error 1
cuda加速的問題,opencv3.1.0+cuda8.0
使用的是opencv3.1.0+cuda8.0,但是make可以,run出錯。 nvidia@tegra-ubuntu:~$ cd project_wly nvidia@tegra-ubuntu:~/project_wly$ cmake . -- The C compiler identification is GNU 5.4.0 -- The CXX compiler identification is GNU 5.4.0 -- Check for working C compiler: /usr/bin/cc -- Check for working C compiler: /usr/bin/cc -- works -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Detecting C compile features -- Detecting C compile features - done -- Check for working CXX compiler: /usr/bin/c++ -- Check for working CXX compiler: /usr/bin/c++ -- works -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Detecting CXX compile features -- Detecting CXX compile features - done -- Configuring done -- Generating done -- Build files have been written to: /home/nvidia/project_wly nvidia@tegra-ubuntu:~/project_wly$ make Scanning dependencies of target project_wly [ 50%] Building CXX object CMakeFiles/project_wly.dir/project_wly.cpp.o [100%] Linking CXX executable project_wly [100%] Built target project_wly nvidia@tegra-ubuntu:~/project_wly$ ./project_wly **OpenCV Error: Gpu API call (invalid device symbol) in loadUMax, file /home/nvidia/opencv_3.1/opencv-3.1.0/modules/cudafeatures2d/src/cuda/orb.cu, line 148 terminate called after throwing an instance of 'cv::Exception' what(): /home/nvidia/opencv_3.1/opencv-3.1.0/modules/cudafeatures2d/src/cuda/orb.cu:148: error: (-217) invalid device symbol in function loadUMax** Aborted (core dumped) nvidia@tegra-ubuntu:~/project_wly$ 求教各位大神,怎麼解決這個問題?
pytorch expected Tensor as element 0 in argument 0, but got tuple
做风格迁移学习时,在 target_feature=model(style_img).clone()发生错误,expected Tensor as element 0 in argument 0, but got tuple 然而style_img的格式是torch.cuda.FloatTensor import torch import torch.nn as nn from torch.autograd import Variable import torch.optim as optim from PIL import Image import matplotlib.pyplot as plt import torchvision.transforms as transforms import torchvision.models as models import copy use_cuda=torch.cuda.is_available() dtype=torch.cuda.FloatTensor if use_cuda else torch.FloatTensor style='images/timg.jpg' content='images/123.jpg' style_weight=1000 content_weight=1 imsize=128 loader=transforms.Compose([ transforms.Resize(imsize), transforms.ToTensor() ]) def image_loader(image_name): image=Image.open(image_name) image=Variable(loader(image)) image=image.unsqueeze(0) return image style_img=image_loader(style).type(dtype) content_img=image_loader(content).type(dtype) print(style_img) assert style_img.size()==content_img.size() def imshow(tensor,title=None): image=tensor.clone().cpu() image=image.view(3,imsize,imsize) image=unloader(image) plt.imshow(image) if title is not None: plt.title(title) plt.pause(0.001) unloader=transforms.ToPILImage() # plt.ion() # # plt.figure() # imshow(style_img.data,title='Style Image') # # plt.figure() # imshow(content_img.data,title='Content Image') # 加载网络 cnn=models.vgg19(pretrained=True).features if use_cuda: cnn=cnn.cuda() class ContentLoss(nn.Module): def __init__(self,target,weight): super(ContentLoss,self).__init__() self.target=target.detach()*weight self.weight=weight self.criterion=nn.MSELoss() def forward(self, *input): self.loss=self.criterion(input*self.weight,self.target) self.output=input return self.output def backward(self,retain_graph=True): self.loss.backward(retain_graph=retain_graph) return self.loss class StyleLoss(nn.Module): def __init__(self,target,weight): super(StyleLoss,self).__init__() self.target=target.detach()*weight self.weight=weight #self.gram=GramMatrix() self.criterion=nn.MSELoss() def forward(self,input): self.output=input.clone() input=input.cuda() self_G=Gram(input) self_G.mul(self.weight) self.loss=self.criterion(self_G,self.target) return self.output def backward(self,retain_graph=True): self.loss.backward(retain_graph=retain_graph) return self.loss def Gram(input): a,b,c,d=input.size() features=input.view(a*b,c*d) G=torch.mm(features,features.t()) return G.div(a*b*c*d) content_layers=['conv_4'] style_layers=['conv_1','conv_2','conv_3','conv_4','conv_5'] content_losses=[] style_losses=[] model=nn.Sequential() if use_cuda: model=model.cuda() i=1 for layer in list(cnn): if isinstance(layer,nn.Conv2d): name='conv_'+str(i) model.add_module(name,layer) if name in content_layers: target=model(content_img).clone() content_loss=ContentLoss(target,content_weight) content_loss=content_loss.cuda() if use_cuda else content_loss model.add_module('content_loss'+str(i),content_loss) content_losses.append(content_loss) if name in style_layers: target_feature=model(style_img).clone() target_feature=target_feature.cuda() if use_cuda else target_feature target_feature_gram=Gram(target_feature) style_loss=StyleLoss(target_feature_gram,style_weight) style_loss=style_loss.cuda() if use_cuda else style_loss model.add_module("style_loss"+str(i),style_loss) style_losses.append(style_loss) if isinstance(layer,nn.ReLU): name='relu'+str(i) model.add_module(name,layer) i+=1 if isinstance(layer,nn.MaxPool2d): name="pool"+str(i) model.add_module(name,layer) input_img=Variable(torch.randn(content_img.data.size())) if use_cuda: input_img=input_img.cuda() content_img=content_img.cuda() style_img=style_img.cuda() plt.figure() imshow(input_img.data,title='Input Image')
R语言中for循环与图片变量的问题
我是一个R语言初学者,在使用For循环画图遇到一个问题,首先我有两个基因名,利用For语言画图得到p1,p2;最后我想要使用CombinePlots将得到的所有的p1,p2....组合起来, 我希望能够根据我输入基因的数量n,在plots = list()里自动生成p1,p2,p3...到pn,不需要手动去输入p1,p2,p3...到pn,但是一直找不到方法实现,我不知道我阐述的够不够清楚 ``` a = 0 for (i in c("Gene1","Gene2")){ a = a+1 assign(paste("p",a,sep=""),VlnPlot(pro, features = i, pt.size=0,use.raw = TRUE, y.log = TRUE)+labs(x = "", y = i, title = "")) } pic <- CombinePlots(plots = list(p1,p2),ncol=ncols,legend = "none") ``` 下面这个命令纯粹只是一个画图的命令,得到p1这张图与第一个基因Gene1对应 ``` assign(paste("p",a,sep=""),VlnPlot(pro, features = i, pt.size=0,use.raw = TRUE, y.log = TRUE)+labs(x = "", y = i, title = "")) ```
关于使用深度强化学习Actor-Critic算法玩gym库中CartPole游戏不收敛的问题,高分悬赏。
小弟最近在自学深度强化学习,看的莫烦大佬的视频。其中有一个用AC算法玩gym库中CartPole的游戏实例,自己写的代码不知为何不能够收敛。考虑到自己自己写的程序中将AC网络写到一个类里去了,尝试过在A网络训练时截断C网络的梯度反向传播防止干扰,但还是不收敛。 小弟小白初学者自己瞎琢磨的,实在找不出原因,高分悬赏,希望大佬们能解惑。代码如下,其中有两个文件,一个是用以运行的主程序,另一个是主程序要调用的类,大佬们跑一下试试。 另外,真心诚意提问,请勿复制粘贴答非所问。 ``` ########主程序:AC_RL_run_this########## import gym from AC_RL_brain import ACNetwork def run_game(): step = 0 for episode in range(100000): episode_reward = 0 observation = env.reset() while True: if episode_reward > 20: env.render() action = RL.choose_action(observation) observation_, reward, done, _ = env.step(action) if done: reward = -20 RL.C_learn(observation, reward, observation_) RL.A_learn(observation, action) episode_reward += reward if done: break observation = observation_ step += 1 print('%d回合总回报:%f' % (episode, episode_reward)) print('game over') env.close() if __name__ == '__main__': env = gym.make('CartPole-v0') env.seed(1) RL = ACNetwork( n_actions=env.action_space.n, n_features=env.observation_space.shape[0], gamma=0.95, A_lr=0.001, C_lr=0.01, ) run_game() ########需要调用的类:AC_RL_brain########## import tensorflow as tf import numpy as np np.random.seed(2) tf.set_random_seed(2) # reproducible class ACNetwork: def __init__( self, n_actions, n_features, gamma, A_lr, C_lr, ): self.n_actions = n_actions self.n_features = n_features self.gamma = gamma self.A_lr = A_lr self.C_lr = C_lr self.td_error_real = 0 self._build_net() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) def _build_net(self): # placeholder self.s = tf.placeholder(tf.float32, [1, self.n_features], "state") self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next") self.r = tf.placeholder(tf.float32, None, 'r') self.a = tf.placeholder(tf.int32, None, "act") # A_net l1_A = tf.layers.dense( inputs=self.s, units=20, # number of hidden units activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases ) self.acts_prob = tf.layers.dense( inputs=l1_A, units=self.n_actions, # output units activation=tf.nn.softmax, # get action probabilities kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases ) self.log_prob = tf.log(self.acts_prob[0, self.a]) self.exp_v = tf.reduce_mean(self.log_prob * self.td_error_real) # advantage (TD_error) guided loss self.train_op_A = tf.train.AdamOptimizer(self.A_lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v) # C_net l1_C = tf.layers.dense( inputs=self.s, units=20, # number of hidden units activation=tf.nn.relu, # None # have to be linear to make sure the convergence of actor. # But linear approximator seems hardly learns the correct Q. kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases ) self.v = tf.layers.dense( inputs=l1_C, units=1, # output units activation=None, kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases ) self.td_error = self.r + self.gamma * self.v_ - self.v self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval self.train_op_C = tf.train.AdamOptimizer(self.C_lr).minimize(self.loss) def choose_action(self, s): s = s[np.newaxis, :] probs = self.sess.run(self.acts_prob, {self.s: s}) # get probabilities for all actions return np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int def A_learn(self, s, a): s = s[np.newaxis, :] feed_dict = {self.s: s, self.a: a} _, exp_v = self.sess.run([self.train_op_A, self.exp_v], feed_dict) def C_learn(self, s, r, s_): s, s_ = s[np.newaxis, :], s_[np.newaxis, :] v_ = self.sess.run(self.v, {self.s: s_}) self.td_error_real, _ = self.sess.run([self.td_error, self.train_op_C], {self.s: s, self.v_: v_, self.r: r}) ```
Myeclipse报错找不到features文件夹
![图片说明](https://img-ask.csdn.net/upload/201506/09/1433858279_124245.png)想给我的Myeclipse安装spket,但总是报错:如图所示:显示在MyEclipse 10 文件夹下面找不到features文件夹中的某个文件,但是实际上features文件夹位于和MyEclipse 10 并列的Common文件夹下面,为什么系统自己总默认去MyEclipse 10文件夹里面找呢?而不是去Common下面找 ![图片说明](https://img-ask.csdn.net/upload/201506/09/1433857822_602322.png)
unity打包问题,unity 和android studio交互
CommandInvokationFailure: Gradle build failed. D:\Unity2018\Editor\Data\PlaybackEngines\AndroidPlayer/Tools\OpenJDK\Windows\bin\java.exe -classpath "D:\Unity2018\Editor\Data\PlaybackEngines\AndroidPlayer\Tools\gradle\lib\gradle-launcher-5.1.1.jar" org.gradle.launcher.GradleMain "-Dorg.gradle.jvmargs=-Xmx4096m" "assembleRelease" stderr[ FAILURE: Build failed with an exception. * What went wrong: Execution failed for task ':processReleaseResources'. > Android resource linking failed error: resource style/Theme.AppCompat.Light.DarkActionBar (aka com.ab1.ProductName:style/Theme.AppCompat.Light.DarkActionBar) not found. D:\WorkProject\test\TestPath\Temp\gradleOut\src\main\res\values\styles.xml:6:1-7:9: AAPT: error: style attribute 'attr/colorPrimary (aka com.ab1.ProductName:attr/colorPrimary)' not found. D:\WorkProject\test\TestPath\Temp\gradleOut\src\main\res\values\styles.xml:6:1-7:9: AAPT: error: style attribute 'attr/colorPrimaryDark (aka com.ab1.ProductName:attr/colorPrimaryDark)' not found. D:\WorkProject\test\TestPath\Temp\gradleOut\src\main\res\values\styles.xml:6:1-7:9: AAPT: error: style attribute 'attr/colorAccent (aka com.ab1.ProductName:attr/colorAccent)' not found. error: failed linking references. * Try: Run with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output. Run with --scan to get full insights. * Get more help at https://help.gradle.org BUILD FAILED in 8s ] stdout[ > Task :preBuild UP-TO-DATE > Task :preReleaseBuild > Task :compileReleaseAidl NO-SOURCE > Task :compileReleaseRenderscript NO-SOURCE > Task :checkReleaseManifest > Task :generateReleaseBuildConfig > Task :prepareLintJar > Task :generateReleaseSources > Task :javaPreCompileRelease > Task :mainApkListPersistenceRelease > Task :generateReleaseResValues > Task :generateReleaseResources > Task :mergeReleaseResources > Task :createReleaseCompatibleScreenManifests > Task :processReleaseManifest > Task :processReleaseResources FAILED Deprecated Gradle features were used in this build, making it incompatible with Gradle 6.0. Use '--warning-mode all' to show the individual deprecation warnings. See https://docs.gradle.org/5.1.1/userguide/command_line_interface.html#sec:command_line_warnings 11 actionable tasks: 11 executed ] exit code: 1 UnityEditor.Android.Command.WaitForProgramToRun (UnityEditor.Utils.Program p, UnityEditor.Android.Command+WaitingForProcessToExit waitingForProcessToExit, System.String errorMsg) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEditor.Android.Command.Run (System.Diagnostics.ProcessStartInfo psi, UnityEditor.Android.Command+WaitingForProcessToExit waitingForProcessToExit, System.String errorMsg) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEditor.Android.Command.Run (System.String command, System.String args, System.String workingdir, UnityEditor.Android.Command+WaitingForProcessToExit waitingForProcessToExit, System.String errorMsg) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEditor.Android.AndroidJavaTools.RunJava (System.String args, System.String workingdir, System.Action`1[T] progress, System.String error) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEditor.Android.GradleWrapper.Run (UnityEditor.Android.AndroidJavaTools javaTools, System.String workingdir, System.String task, System.Action`1[T] progress) (at <67e4f96bbb8d486db6550813353bb5eb>:0) Rethrow as GradleInvokationException: Gradle build failed UnityEditor.Android.GradleWrapper.Run (UnityEditor.Android.AndroidJavaTools javaTools, System.String workingdir, System.String task, System.Action`1[T] progress) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEditor.Android.PostProcessor.Tasks.BuildGradleProject.Execute (UnityEditor.Android.PostProcessor.PostProcessorContext context) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEditor.Android.PostProcessor.PostProcessRunner.RunAllTasks (UnityEditor.Android.PostProcessor.PostProcessorContext context) (at <67e4f96bbb8d486db6550813353bb5eb>:0) UnityEngine.GUIUtility:ProcessEvent(Int32, IntPtr)
/usr/bin/ld: cannot find -lOPTIONS 无法找到OPTIONS库。
我在UBUNTU 18.04 下用CUDA 10.2 和 C++11标准编译一个较大的工程。 Makefile 是用CMAKE文件生成的。当我在terminal完成编译时显示了如下错误: ``` /usr/bin/ld: cannot find -lOPTIONS collect2: error: ld returned 1 exit status CMakeFiles/cuda_othermain.dir/build.make:132: recipe for target 'bin/cuda_othermain' failed make[2]: *** [bin/cuda_othermain] Error 1 CMakeFiles/Makefile2:79: recipe for target 'CMakeFiles/cuda_othermain.dir/all' failed make[1]: *** [CMakeFiles/cuda_othermain.dir/all] Error 2 Makefile:83: recipe for target 'all' failed make: *** [all] Error 2 ``` 提示是找不到一个叫OPTIONS的库。我的camke和源码里都没有链接这个库,而且我在网上搜索也根本没有一个库叫做OPTIONS。我很疑惑,编译工程时没有显示任何其他错误,顶多就是有几个warning,我查了一下都是没有太大关系的。所以现在没有什么思路去解决这个问题。 所以想请教一下各位有没有什么解决思路。 CMAKE文件如下: ``` cmake_minimum_required (VERSION 3.8 FATAL_ERROR) #project (cusam_cuda) project(cusam_cuda LANGUAGES C CXX CUDA) find_package(CUDA 10.2 REQUIRED) set(CUDA_NVCC_FLAGS -std=c++11 -L/usr/local/cuda-10.2/lib64 -lcudart -lcuda) set(CMAKE_CXX_STANDARD 11) if (CUDA_VERBOSE_PTXAS) set(VERBOSE_PTXAS --ptxas-options=-v) endif (CUDA_VERBOSE_PTXAS) #set(CMAKE_BUILD_TYPE "Release") set(CMAKE_BUILD_TYPE "Debug") SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CUDA_NVCC_FLAGS} -O0 -Wall -g -ggdb") SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CUDA_NVCC_FLAGS} -O3 -Wall") set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) set(GENCODE_SM30 -gencode=arch=compute_30,code=sm_30 -gencode=arch=compute_30,code=compute_30) set(GENCODE_SM35 -gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_35,code=compute_35) set(GENCODE_SM37 -gencode=arch=compute_37,code=sm_37 -gencode=arch=compute_37,code=compute_37) set(GENCODE_SM50 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_50,code=compute_50) set(GENCODE_SM60 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_60,code=compute_60) set(GENCODE_SM61 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_61,code=compute_61) set(GENCODE_SM70 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70) set(GENCODE_SM71 -gencode=arch=compute_71,code=sm_71 -gencode=arch=compute_71,code=compute_71) set(GENCODE_SM75 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_75,code=compute_75) option(CUDAMATRIX_GENCODE_SM30 "GENCODE_SM30" OFF) option(CUDAMATRIX_GENCODE_SM35 "GENCODE_SM35" ON) option(CUDAMATRIX_GENCODE_SM37 "GENCODE_SM37" OFF) option(CUDAMATRIX_GENCODE_SM50 "GENCODE_SM50" OFF) option(CUDAMATRIX_GENCODE_SM60 "GENCODE_SM60" OFF) option(CUDAMATRIX_GENCODE_SM61 "GENCODE_SM61" OFF) option(CUDAMATRIX_GENCODE_SM70 "GENCODE_SM70" OFF) option(CUDAMATRIX_GENCODE_SM71 "GENCODE_SM71" OFF) option(CUDAMATRIX_GENCODE_SM75 "GENCODE_SM75" OFF) if (CUDAMATRIX_GENCODE_SM37) set(GENCODE ${GENCODE} ${GENCODE_SM37}) endif(CUDAMATRIX_GENCODE_SM37) if (CUDAMATRIX_GENCODE_SM50) set(GENCODE ${GENCODE} ${GENCODE_SM50}) endif(CUDAMATRIX_GENCODE_SM50) if (CUDAMATRIX_GENCODE_SM60) set(GENCODE ${GENCODE} ${GENCODE_SM60}) endif(CUDAMATRIX_GENCODE_SM60) if (CUDAMATRIX_GENCODE_SM61) set(GENCODE ${GENCODE} ${GENCODE_SM61}) endif(CUDAMATRIX_GENCODE_SM61) if (CUDAMATRIX_GENCODE_SM70) set(GENCODE ${GENCODE} ${GENCODE_SM70}) endif(CUDAMATRIX_GENCODE_SM70) if(CUDAMATRIX_GENCODE_SM71) set(GENCODE ${GENCODE} ${GENCODE_SM71}) endif(CUDAMATRIX_GENCODE_SM71) if(CUDAMATRIX_GENCODE_SM75) set(GENCODE ${GENCODE} ${GENCODE_SM75}) endif(CUDAMATRIX_GENCODE_SM75) include_directories(/usr/local/cuda/include) include_directories(utils) #include_directories(3rdparty/googletest/googletest) #include_directories(3rdparty/googletest/googletest/include) #add_subdirectory(3rdparty/googletest/googletest googletest.out) add_subdirectory(geometry) add_subdirectory(navigation) add_subdirectory(3rdparty) add_subdirectory(nonlinear) add_subdirectory(inference) add_subdirectory(mat) add_subdirectory(miniblas) add_subdirectory(miniblas/cblas) add_subdirectory(miniblas/blas) add_subdirectory(miniblas/permutation) add_subdirectory(miniblas/sys) add_subdirectory(miniblas/linalg) add_subdirectory(linear) #add_subdirectory(test) #cuda_add_executable(imukittiexamplegps_gaussiannewton imukittiexamplegps_gaussiannewton.cpp # OPTIONS ${GENCODE} ${CUDA_VERBOSE_PTXAS}) #target_link_libraries(imukittiexamplegps_gaussiannewton geometry miniblas blas cblas linalg permutation sys navigation 3rdparty linear nonlinear inference mat) #cuda_add_executable(othermain othermain.cpp # OPTIONS ${GENCODE} ${CUDA_VERBOSE_PTXAS}) #target_link_libraries(othermain geometry miniblas blas cblas linalg permutation sys navigation 3rdparty linear nonlinear inference mat) target_compile_features(nonlinear PUBLIC cxx_std_11) cuda_add_executable(cuda_othermain cuda_othermain.cu OPTIONS ${GENCODE} ${CUDA_VERBOSE_PTXAS}) target_compile_features(cuda_othermain PUBLIC cxx_std_11) set_target_properties(cuda_othermain PROPERTIES CUDA_SEPARABLE_COMPILATION ON) target_link_libraries(cuda_othermain geometry miniblas blas cblas linalg permutation sys navigation 3rdparty linear nonlinear inference mat) ``` 源代码就没发帖了,因为工程还算比较大。有劳各位了。
idea控制台输出时包含空格和换行符格式不规整
``` hadoop spark spark hive hadoop spark +--------------+----------------+ | features| scaledFeatures| +--------------+----------------+ |[1.0,0.1,-8.0]|[0.25,0.01,-1.0]| |[2.0,1.0,-4.0]| [0.5,0.1,-0.5]| |[4.0,10.0,8.0]| [1.0,1.0,1.0]| +--------------+----------------+ ``` 为什么idea控制台输出总有一行前面添加了许多空格啊,控制台字体已经是等宽字体了。 自动添加空格的一行都是在新输出语句的第一行输出内容上 源码: ``` import java.util.StringTokenizer import org.apache.spark.ml.feature.{MaxAbsScaler, MaxAbsScalerModel} import org.apache.spark.ml.linalg.Vectors import org.apache.spark.sql.{DataFrame, SparkSession} object test { def main(args: Array[String]): Unit = { val spark: SparkSession = SparkSession .builder() //.appName(this.getClass.getSimpleName.stripSuffix("$")) .master("local[3]") .config("spark.sql.shuffle.partitions", "4") .getOrCreate() val line="hadoop spark spark hive hadoop spark" val stringTokenizer = new StringTokenizer(line," ") while (stringTokenizer.hasMoreTokens){ val word: String = stringTokenizer.nextToken() println(word) } val dataframe: DataFrame = spark.createDataFrame(Seq( (0,Vectors.dense(1.0,0.1,-8.0)), (1,Vectors.dense(2.0,1.0,-4.0)), (2,Vectors.dense(4.0,10.0,8.0)) ) ).toDF("id","features") val scaler: MaxAbsScaler = new MaxAbsScaler() .setInputCol("features") .setOutputCol("scaledFeatures") println("===================") val scalerModel: MaxAbsScalerModel = scaler.fit(dataframe) scalerModel.transform(dataframe) .select("features","scaledFeatures") .show() } } ```
android studio连接mysql数据库报错,请问这是那里的问题?
我用android studio 连接mysql数据库,用的是从maven上下载的JDBC包。但是点击运行后它提示: AGPBI: {"kind":"error","text":"Default interface methods are only supported starting with Android N (--min-api 24): com.mysql.cj.protocol.Message com.mysql.cj.interceptors.QueryInterceptor.postProcess(com.mysql.cj.protocol.Message, com.mysql.cj.protocol.Message)","sources":[{}],"tool":"D8"} FAILURE: Build failed with an exception. * What went wrong: Execution failed for task ':app:mergeExtDexDebug'. > Could not resolve all files for configuration ':app:debugRuntimeClasspath'. > Failed to transform file 'mysql-connector-java-8.0.11.jar' to match attributes {artifactType=android-dex, dexing-is-debuggable=true, dexing-min-sdk=15, org.gradle.usage=java-runtime-jars} > Execution failed for DexingTransform: D:\Anzhuo-SDK\android project\app\libs\mysql-connector-java-8.0.11.jar. > Error while dexing. > Failed to transform file 'mysql-connector-java-8.0.11.jar' to match attributes {artifactType=android-dex, dexing-is-debuggable=true, dexing-min-sdk=15, org.gradle.usage=java-runtime-jars} > Execution failed for DexingTransform: D:\Anzhuo-SDK\android project\app\libs\mysql-connector-java-8.0.11.jar. > Error while dexing. * Try: Run with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output. Run with --scan to get full insights. * Get more help at https://help.gradle.org Deprecated Gradle features were used in this build, making it incompatible with Gradle 6.0. Use '--warning-mode all' to show the individual deprecation warnings. See https://docs.gradle.org/5.1.1/userguide/command_line_interface.html#sec:command_line_warnings BUILD FAILED in 19s 我想问一下,这是为什么?是因为JAR的版本问题吗?
knn猫狗识别算法中样本为0报错
错误如下: ValueError: With n_samples=0, test_size=0.25 and train_size=None, the resulting train set will be empty. Adjust any of the aforementioned parameters. 代码如下: # import the necessary packages from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from pyimagesearch.preprocessing import SimplePreprocessor from pyimagesearch.datasets import SimpleDatasetLoader from imutils import paths import argparse # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-d", "--datasets", type=str, default="‪H:\\111\\try1\\pyimagesearch\\datasets\\animals\\", help="path to input dataset") ap.add_argument("-k", "--neighbors", type=int, default=1, help="# of nearest neighbors for classification") ap.add_argument("-j", "--jobs", type=int, default=-1, help="# of jobs for k-NN distance (-1 uses all available cores)") args = vars(ap.parse_args()) # grab the list of images that we’ll be describing print("[INFO] loading images...") imagePaths = list(paths.list_images(args["datasets"])) # initialize the image preprocessor, load the dataset from disk, # and reshape the data matrix sp = SimplePreprocessor.SimplePreprocessor(32, 32) sdl = SimpleDatasetLoader.SimpleDatasetLoader(preprocessors=[sp]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.reshape((data.shape[0], 3072)) # show some information on memory consumption of the images print("[INFO] features matrix: {:.1f}MB".format( data.nbytes / (1024 * 1000.0))) # encode the labels as integers le = LabelEncoder() labels = le.fit_transform(labels) # partition the data into training and testing splits using 75% of # the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # train and evaluate a k-NN classifier on the raw pixel intensities print("[INFO] evaluating k-NN classifier...") model = KNeighborsClassifier(n_neighbors=args["neighbors"], n_jobs=args["jobs"]) model.fit(trainX, trainY) print(classification_report(testY, model.predict(testX), target_names=le.classes_)) ``` ```
TreeBagger 就是随机森林算法吗?随机森林提取特征(matlab)
两分类,200个样本,50个特征,现在想用随机森林的办法取出排在前20的特征。 不大会写程序,看网上有用matlab随机森林工具包,有以下几种写法, 1、 B = TreeBagger(nTree ,new_data ,new_label ,'OOBPred','on','NVarToSample','all') 2、B = TreeBagger(train_data,features,classLabels, 'Method', 'classification'); 3、 B = TreeBagger(nTree,train_data,train_label); 有几个疑问: 1、 TreeBagger就是随机森林吗? 2、应该用上面哪个写法才对呢? 3、如何查看特征权重呢? 求路过的大神指点一下,那个TreeBagger程序看不太懂。
idea,gradle项目报错,解决不了/(ㄒoㄒ)/~~.
``` import javax.swing.*; public class test { private JPanel panel1; public static void main(String[] args) { JFrame frame = new JFrame("test"); frame.setContentPane(new test().panel1); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.pack(); frame.setVisible(true); } } //自动生成的方法,第六行会报错,注释掉能运行...... ``` ![图片说明](https://img-ask.csdn.net/upload/201909/21/1569072123_755971.png) 报错信息: ------------------------------------------------------------------------------- Gradle Daemon started in 2 s 140 ms > Task :compileJava > Task :processResources NO-SOURCE > Task :classes > Task :test.main() FAILED Exception in thread "main" java.awt.IllegalComponentStateException: contentPane cannot be set to null. at java.desktop/javax.swing.JRootPane.setContentPane(JRootPane.java:598) at java.desktop/javax.swing.JFrame.setContentPane(JFrame.java:679) at test.main(test.java:8) FAILURE: Build failed with an exception. * What went wrong: Execution failed for task ':test.main()'. > Process 'command 'C:/Program Files/Java/jdk-12.0.2/bin/java.exe'' finished with non-zero exit value 1 * Try: Run with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output. Run with --scan to get full insights. * Get more help at https://help.gradle.org Deprecated Gradle features were used in this build, making it incompatible with Gradle 6.0. Use '--warning-mode all' to show the individual deprecation warnings. See https://docs.gradle.org/5.2.1/userguide/command_line_interface.html#sec:command_line_warnings BUILD FAILED in 6s 2 actionable tasks: 2 executed 21:15:23: External task execution finished ' 'test.main()''. --------------------------------------------------------------------------- ![图片说明](https://img-ask.csdn.net/upload/201909/21/1569072154_766250.png) 根据网上的教程搞得,以前可以运行,一顿操作后就报错了,更新版本,然后插件出了问题,卸载自己安装的插件.jdk一直是最新的12.
终于明白阿里百度这样的大公司,为什么面试经常拿ThreadLocal考验求职者了
点击上面↑「爱开发」关注我们每晚10点,捕获技术思考和创业资源洞察什么是ThreadLocalThreadLocal是一个本地线程副本变量工具类,各个线程都拥有一份线程私有的数
程序员必须掌握的核心算法有哪些?
由于我之前一直强调数据结构以及算法学习的重要性,所以就有一些读者经常问我,数据结构与算法应该要学习到哪个程度呢?,说实话,这个问题我不知道要怎么回答你,主要取决于你想学习到哪些程度,不过针对这个问题,我稍微总结一下我学过的算法知识点,以及我觉得值得学习的算法。这些算法与数据结构的学习大多数是零散的,并没有一本把他们全部覆盖的书籍。下面是我觉得值得学习的一些算法以及数据结构,当然,我也会整理一些看过...
《奇巧淫技》系列-python!!每天早上八点自动发送天气预报邮件到QQ邮箱
此博客仅为我业余记录文章所用,发布到此,仅供网友阅读参考,如有侵权,请通知我,我会删掉。 补充 有不少读者留言说本文章没有用,因为天气预报直接打开手机就可以收到了,为何要多此一举发送到邮箱呢!!!那我在这里只能说:因为你没用,所以你没用!!! 这里主要介绍的是思路,不是天气预报!不是天气预报!!不是天气预报!!!天气预报只是用于举例。请各位不要再刚了!!! 下面是我会用到的两个场景: 每日下
死磕YOLO系列,YOLOv1 的大脑、躯干和手脚
YOLO 是我非常喜欢的目标检测算法,堪称工业级的目标检测,能够达到实时的要求,它帮我解决了许多实际问题。 这就是 YOLO 的目标检测效果。它定位了图像中物体的位置,当然,也能预测物体的类别。 之前我有写博文介绍过它,但是每次重新读它的论文,我都有新的收获,为此我准备写一个系列的文章来详尽分析它。这是第一篇,从它的起始 YOLOv1 讲起。 YOLOv1 的论文地址:https://www.c...
知乎高赞:中国有什么拿得出手的开源软件产品?(整理自本人原创回答)
知乎高赞:中国有什么拿得出手的开源软件产品? 在知乎上,有个问题问“中国有什么拿得出手的开源软件产品(在 GitHub 等社区受欢迎度较好的)?” 事实上,还不少呢~ 本人于2019.7.6进行了较为全面的回答,对这些受欢迎的 Github 开源项目分类整理如下: 分布式计算、云平台相关工具类 1.SkyWalking,作者吴晟、刘浩杨 等等 仓库地址: apache/skywalking 更...
20行Python代码爬取王者荣耀全英雄皮肤
引言 王者荣耀大家都玩过吧,没玩过的也应该听说过,作为时下最火的手机MOBA游戏,咳咳,好像跑题了。我们今天的重点是爬取王者荣耀所有英雄的所有皮肤,而且仅仅使用20行Python代码即可完成。 准备工作 爬取皮肤本身并不难,难点在于分析,我们首先得得到皮肤图片的url地址,话不多说,我们马上来到王者荣耀的官网: 我们点击英雄资料,然后随意地选择一位英雄,接着F12打开调试台,找到英雄原皮肤的图片...
简明易理解的@SpringBootApplication注解源码解析(包含面试提问)
欢迎关注文章系列 ,关注我 《提升能力,涨薪可待》 《面试知识,工作可待》 《实战演练,拒绝996》 欢迎关注我博客,原创技术文章第一时间推出 也欢迎关注公 众 号【Ccww笔记】,同时推出 如果此文对你有帮助、喜欢的话,那就点个赞呗,点个关注呗! 《提升能力,涨薪可待篇》- @SpringBootApplication注解源码解析 一、@SpringBootApplication 的作用是什...
西游记团队中如果需要裁掉一个人,会先裁掉谁?
2019年互联网寒冬,大批企业开始裁员,下图是网上流传的一张截图: 裁员不可避免,那如何才能做到不管大环境如何变化,自身不受影响呢? 我们先来看一个有意思的故事,如果西游记取经团队需要裁员一名,会裁掉谁呢,为什么? 西游记团队组成: 1.唐僧 作为团队teamleader,有很坚韧的品性和极高的原则性,不达目的不罢休,遇到任何问题,都没有退缩过,又很得上司支持和赏识(直接得到唐太宗的任命,既给袈...
Python语言高频重点汇总
Python语言高频重点汇总 GitHub面试宝典仓库 回到首页 目录: Python语言高频重点汇总 目录: 1. 函数-传参 2. 元类 3. @staticmethod和@classmethod两个装饰器 4. 类属性和实例属性 5. Python的自省 6. 列表、集合、字典推导式 7. Python中单下划线和双下划线 8. 格式化字符串中的%和format 9. 迭代器和生成器 10...
究竟你适不适合买Mac?
我清晰的记得,刚买的macbook pro回到家,开机后第一件事情,就是上了淘宝网,花了500元钱,找了一个上门维修电脑的师傅,上门给我装了一个windows系统。。。。。。 表砍我。。。 当时买mac的初衷,只是想要个固态硬盘的笔记本,用来运行一些复杂的扑克软件。而看了当时所有的SSD笔记本后,最终决定,还是买个好(xiong)看(da)的。 已经有好几个朋友问我mba怎么样了,所以今天尽量客观
程序员一般通过什么途径接私活?
二哥,你好,我想知道一般程序猿都如何接私活,我也想接,能告诉我一些方法吗? 上面是一个读者“烦不烦”问我的一个问题。其实不止是“烦不烦”,还有很多读者问过我类似这样的问题。 我接的私活不算多,挣到的钱也没有多少,加起来不到 20W。说实话,这个数目说出来我是有点心虚的,毕竟太少了,大家轻喷。但我想,恰好配得上“一般程序员”这个称号啊。毕竟苍蝇再小也是肉,我也算是有经验的人了。 唾弃接私活、做外
ES6基础-ES6的扩展
进行对字符串扩展,正则扩展,数值扩展,函数扩展,对象扩展,数组扩展。 开发环境准备: 编辑器(VS Code, Atom,Sublime)或者IDE(Webstorm) 浏览器最新的Chrome 字符串的扩展: 模板字符串,部分新的方法,新的unicode表示和遍历方法: 部分新的字符串方法 padStart,padEnd,repeat,startsWith,endsWith,includes 字...
Python爬虫爬取淘宝,京东商品信息
小编是一个理科生,不善长说一些废话。简单介绍下原理然后直接上代码。 使用的工具(Python+pycharm2019.3+selenium+xpath+chromedriver)其中要使用pycharm也可以私聊我selenium是一个框架可以通过pip下载 pip install selenium -i https://pypi.tuna.tsinghua.edu.cn/simple/ 
阿里程序员写了一个新手都写不出的低级bug,被骂惨了。
你知道的越多,你不知道的越多 点赞再看,养成习惯 本文 GitHub https://github.com/JavaFamily 已收录,有一线大厂面试点思维导图,也整理了很多我的文档,欢迎Star和完善,大家面试可以参照考点复习,希望我们一起有点东西。 前前言 为啥今天有个前前言呢? 因为你们的丙丙啊,昨天有牌面了哟,直接被微信官方推荐,知乎推荐,也就仅仅是还行吧(心里乐开花)
Java工作4年来应聘要16K最后没要,细节如下。。。
前奏: 今天2B哥和大家分享一位前几天面试的一位应聘者,工作4年26岁,统招本科。 以下就是他的简历和面试情况。 基本情况: 专业技能: 1、&nbsp;熟悉Sping了解SpringMVC、SpringBoot、Mybatis等框架、了解SpringCloud微服务 2、&nbsp;熟悉常用项目管理工具:SVN、GIT、MAVEN、Jenkins 3、&nbsp;熟悉Nginx、tomca
Python爬虫精简步骤1 获取数据
爬虫的工作分为四步: 1.获取数据。爬虫程序会根据我们提供的网址,向服务器发起请求,然后返回数据。 2.解析数据。爬虫程序会把服务器返回的数据解析成我们能读懂的格式。 3.提取数据。爬虫程序再从中提取出我们需要的数据。 4.储存数据。爬虫程序把这些有用的数据保存起来,便于你日后的使用和分析。 这一篇的内容就是:获取数据。 首先,我们将会利用一个强大的库——requests来获取数据。 在电脑上安装
作为一个程序员,CPU的这些硬核知识你必须会!
CPU对每个程序员来说,是个既熟悉又陌生的东西? 如果你只知道CPU是中央处理器的话,那可能对你并没有什么用,那么作为程序员的我们,必须要搞懂的就是CPU这家伙是如何运行的,尤其要搞懂它里面的寄存器是怎么一回事,因为这将让你从底层明白程序的运行机制。 随我一起,来好好认识下CPU这货吧 把CPU掰开来看 对于CPU来说,我们首先就要搞明白它是怎么回事,也就是它的内部构造,当然,CPU那么牛的一个东
破14亿,Python分析我国存在哪些人口危机!
2020年1月17日,国家统计局发布了2019年国民经济报告,报告中指出我国人口突破14亿。 猪哥的朋友圈被14亿人口刷屏,但是很多人并没有看到我国复杂的人口问题:老龄化、男女比例失衡、生育率下降、人口红利下降等。 今天我们就来分析一下我们国家的人口数据吧! 更多有趣分析教程,扫描下方二维码关注vx公号「裸睡的猪」 即可查看! 一、背景 1.人口突破14亿 2020年1月17日,国家统计局发布
web前端javascript+jquery知识点总结
Javascript javascript 在前端网页中占有非常重要的地位,可以用于验证表单,制作特效等功能,它是一种描述语言,也是一种基于对象(Object)和事件驱动并具有安全性的脚本语言 ,语法同java类似,是一种解释性语言,边执行边解释。 JavaScript的组成: ECMAScipt 用于描述: 语法,变量和数据类型,运算符,逻辑控制语句,关键字保留字,对象。 浏览器对象模型(Br
Qt实践录:开篇
本系列文章介绍笔者的Qt实践之路。
在家远程办公效率低?那你一定要收好这个「在家办公」神器!
相信大家都已经收到国务院延长春节假期的消息,接下来,在家远程办公可能将会持续一段时间。 但是问题来了。远程办公不是人在电脑前就当坐班了,相反,对于沟通效率,文件协作,以及信息安全都有着极高的要求。有着非常多的挑战,比如: 1在异地互相不见面的会议上,如何提高沟通效率? 2文件之间的来往反馈如何做到及时性?如何保证信息安全? 3如何规划安排每天工作,以及如何进行成果验收? ......
作为一个程序员,内存和磁盘的这些事情,你不得不知道啊!!!
截止目前,我已经分享了如下几篇文章: 一个程序在计算机中是如何运行的?超级干货!!! 作为一个程序员,CPU的这些硬核知识你必须会! 作为一个程序员,内存的这些硬核知识你必须懂! 这些知识可以说是我们之前都不太重视的基础知识,可能大家在上大学的时候都学习过了,但是嘞,当时由于老师讲解的没那么有趣,又加上这些知识本身就比较枯燥,所以嘞,大家当初几乎等于没学。 再说啦,学习这些,也看不出来有什么用啊!
这个世界上人真的分三六九等,你信吗?
偶然间,在知乎上看到一个问题 一时间,勾起了我深深的回忆。 以前在厂里打过两次工,做过家教,干过辅导班,做过中介。零下几度的晚上,贴过广告,满脸、满手地长冻疮。   再回首那段岁月,虽然苦,但让我学会了坚持和忍耐。让我明白了,在这个世界上,无论环境多么的恶劣,只要心存希望,星星之火,亦可燎原。   下文是原回答,希望能对你能有所启发。   如果我说,这个世界上人真的分三六九等,
为什么听过很多道理,依然过不好这一生?
记录学习笔记是一个重要的习惯,不希望学习过的东西成为过眼云烟。做总结的同时也是一次复盘思考的过程。 本文是根据阅读得到 App上《万维钢·精英日课》部分文章后所做的一点笔记和思考。学习是一个系统的过程,思维模型的建立需要相对完整的学习和思考过程。以下观点是在碎片化阅读后总结的一点心得总结。
B 站上有哪些很好的学习资源?
哇说起B站,在小九眼里就是宝藏般的存在,放年假宅在家时一天刷6、7个小时不在话下,更别提今年的跨年晚会,我简直是跪着看完的!! 最早大家聚在在B站是为了追番,再后来我在上面刷欧美新歌和漂亮小姐姐的舞蹈视频,最近两年我和周围的朋友们已经把B站当作学习教室了,而且学习成本还免费,真是个励志的好平台ヽ(.◕ฺˇд ˇ◕ฺ;)ノ 下面我们就来盘点一下B站上优质的学习资源: 综合类 Oeasy: 综合
雷火神山直播超两亿,Web播放器事件监听是怎么实现的?
Web播放器解决了在手机浏览器和PC浏览器上播放音视频数据的问题,让视音频内容可以不依赖用户安装App,就能进行播放以及在社交平台进行传播。在视频业务大数据平台中,播放数据的统计分析非常重要,所以Web播放器在使用过程中,需要对其内部的数据进行收集并上报至服务端,此时,就需要对发生在其内部的一些播放行为进行事件监听。 那么Web播放器事件监听是怎么实现的呢? 01 监听事件明细表 名
3万字总结,Mysql优化之精髓
本文知识点较多,篇幅较长,请耐心学习 MySQL已经成为时下关系型数据库产品的中坚力量,备受互联网大厂的青睐,出门面试想进BAT,想拿高工资,不会点MySQL优化知识,拿offer的成功率会大大下降。 为什么要优化 系统的吞吐量瓶颈往往出现在数据库的访问速度上 随着应用程序的运行,数据库的中的数据会越来越多,处理时间会相应变慢 数据是存放在磁盘上的,读写速度无法和内存相比 如何优化 设计
一条链接即可让黑客跟踪你的位置! | Seeker工具使用
搬运自:冰崖的部落阁(icecliffsnet) 严正声明:本文仅限于技术讨论,严禁用于其他用途。 请遵守相对应法律规则,禁止用作违法途径,出事后果自负! 上次写的防社工文章里边提到的gps定位信息(如何防止自己被社工或人肉) 除了主动收集他人位置信息以外,我们还可以进行被动收集 (没有技术含量) Seeker作为一款高精度地理位置跟踪工具,同时也是社交工程学(社会工程学)爱好者...
作为程序员的我,大学四年一直自学,全靠这些实用工具和学习网站!
我本人因为高中沉迷于爱情,导致学业荒废,后来高考,毫无疑问进入了一所普普通通的大学,实在惭愧...... 我又是那么好强,现在学历不行,没办法改变的事情了,所以,进入大学开始,我就下定决心,一定要让自己掌握更多的技能,尤其选择了计算机这个行业,一定要多学习技术。 在进入大学学习不久后,我就认清了一个现实:我这个大学的整体教学质量和学习风气,真的一言难尽,懂的人自然知道怎么回事? 怎么办?我该如何更好的提升
前端JS初级面试题二 (。•ˇ‸ˇ•。)老铁们!快来瞧瞧自己都会了么
1. 传统事件绑定和符合W3C标准的事件绑定有什么区别? 传统事件绑定 &lt;div onclick=""&gt;123&lt;/div&gt; div1.onclick = function(){}; &lt;button onmouseover=""&gt;&lt;/button&gt; 注意: 如果给同一个元素绑定了两次或多次相同类型的事件,那么后面的绑定会覆盖前面的绑定 (不支持DOM事...
相关热词 c#导入fbx c#中屏蔽键盘某个键 c#正态概率密度 c#和数据库登陆界面设计 c# 高斯消去法 c# codedom c#读取cad文件文本 c# 控制全局鼠标移动 c# temp 目录 bytes初始化 c#
立即提问