influxdb 配置文件注释

摘要:
8088“#设置用户备份和修复(数据)的RPC服务地址。
### Welcome to the InfluxDB configuration file.

# The values in this file override the default values used by the system if
# a config option is not specified. The commented out lines are the configuration
# field and the default value used. Uncommenting a line and changing the value
# will change the value used at runtime when the process is restarted.

# 
# 将某行的注释符号去除并修改其值,那么在下次influxdb进程重启后,你修改的数值就会在程序的运行缓存中生效(runtime)
# 如果你不去除注释符号,则可以查看这个参数的默认数值。

# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
# The data includes a random ID, os, arch, version, the number of series and other
# usage data. No data from user databases is ever transmitted.
# Change this option to true to disable reporting.
# reporting-disabled = false

# 必须要告知你,当influxdb启动后,每24小时influxdb会向usage.influxdata.com汇报一些使用数据
# 这些上报的数据包括随机ID,运行系统,influxdb版本以及数据库中series的数据
# 但是用户数据库中的数据是不会被发送的,当然你可以取消下面这个reporting-disabled = false 的注释来禁用自动数据上报


# Bind address to use for the RPC service for backup and restore.
# bind-address = "127.0.0.1:8088"

# 设置用户备份与修复(数据)的RPC服务地址


###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###

# 元数据 meta
# 存储有关InfluxDB集群元数据的 Raft consensus group 的控制参数将在下面被配置。


[meta]
  # Where the metadata/raft database is stored
  dir = "/var/lib/influxdb/meta"
  # 元数据/raft 数据库被存储的路径  即meta目录

  # Automatically create a default retention policy when creating a database.
  # retention-autocreate = true
  # 当创建一个新的数据库时自动为其创建一个默认的rentention policy(保留策略)

  # If log messages are printed for the meta service
  # logging-enabled = true
  # 是否为meta服务打印日志

###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###

# 数据 data 
# 这里的参数将会修改InfluxDB的分片数据的具体存放路径,以及这部分数据从WAL(Write Ahead Log)
# 刷新的(策略)。你需要根据系统的情况来为"dir"修改一个合适的路径。(换言之,你应该大致估计在你保存策略下
#,你的数据需要多少磁盘空间) 当然WAL相关的配置是一种高级设置,而默认的配置应能满足大多数的
# 使用场景

[data]
  # The directory where the TSM storage engine stores TSM files.
  dir = "/var/lib/influxdb/data"
  # TSM存储引擎存储TSM文件的目录

  # The directory where the TSM storage engine stores WAL files.
  wal-dir = "/var/lib/influxdb/wal"
  # TSM存储引擎存储WAL文件的目录

  # The amount of time that a write will wait before fsyncing.  A duration
  # greater than 0 can be used to batch up multiple fsync calls.  This is useful for slower
  # disks or when WAL write contention is seen.  A value of 0s fsyncs every write to the WAL.
  # Values in the range of 0-100ms are recommended for non-SSD disks.
  # wal-fsync-delay = "0s"
  # 这个时间参数是在fsyncing之前一个写入操作将等待的时间。(fsync解释为:帧同步脉冲 操作 被同步 把文件在内存中的部分写回磁盘)
  # 大于0的参数可以用来整理多重fsync操作。当系统磁盘读写速度较慢或出现WAL写入延缓时,可以考虑配置该参数
  # 对于非固态硬盘,这个参数推荐的配置范围是0~100毫秒


  # The type of shard index to use for new shards.  The default is an in-memory index that is
  # recreated at startup.  A value of "tsi1" will use a disk based index that supports higher
  # cardinality datasets.
  # index-version = "inmem"
  # 对于influxdb新产生的分片索引的类型将在这里被设置。初始默认的索引类型是一种 in-memory 索引,如果你修改成
  #  tsi1 将会对于高基数数据支持的更好。(high-cardinality 是描述tag rentention-policy measurement组合成series规模的度量值)

  # Trace logging provides more verbose output around the tsm engine. Turning
  # this on can provide more useful output for debugging tsm engine issues.
  # trace-logging-enabled = false
  # 打印追踪日志,也就是打印debug日志。如果打开的话会创建大量冗长的、有关tsm存储引擎工作细节的日志。


  # Whether queries should be logged before execution. Very useful for troubleshooting, but will
  # log any sensitive data contained within a query.
  # query-log-enabled = true
  # 这个配置决定在执行query查询动作前是否记录为日志。对于解决问题很有帮助
  # 但是会把所有含有query的敏感数据都记录下来



  # Settings for the TSM engine
  # 有关TSM存储引擎的设置


  # CacheMaxMemorySize is the maximum size a shard's cache can
  # reach before it starts rejecting writes.
  # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
  # Vaues without a size suffix are in bytes.
  # cache-max-memory-size = "1g"
  # cache-max-memory-size 是一个分片在拒接写入之前的最大容量(换言之就是一个分片的最大容量)
  # 这里的体积单位可以是 k m 或者g 并且不区分大小写,如果不写单位就表示byte


  # CacheSnapshotMemorySize is the size at which the engine will
  # snapshot the cache and write it to a TSM file, freeing up memory
  # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
  # Values without a size suffix are in bytes.
  # cache-snapshot-memory-size = "25m"
  # cache-snapshot-memory-size 是引擎向TSM文件写入时,快照缓存数据的大小
  # 单位规则和上面的一样


  # CacheSnapshotWriteColdDuration is the length of time at
  # which the engine will snapshot the cache and write it to
  # a new TSM file if the shard hasn't received writes or deletes
  # cache-snapshot-write-cold-duration = "10m"
  # 这个参数为:如果分片(shard)没有接收到写入或删除(请求),引擎向TSM文件写入时候产生的快照缓存文件的时间


  # CompactFullWriteColdDuration is the duration at which the engine
  # will compact all TSM files in a shard if it hasn't received a
  # write or delete
  # compact-full-write-cold-duration = "4h"
  # 这个参数为:若分片没有收到写入或者删除(请求),则在这个参数表示的时间间隔后压缩这个分片中的所有TSM文件


  # The maximum number of concurrent full and level compactions that can run at one time.  A
  # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime.  Any number greater
  # than 0 limits compactions to that value.  This setting does not apply
  # to cache snapshotting.
  # max-concurrent-compactions = 0
  # 同一时刻,全并发与level compaction的最大数值。(与influxdb并发性能有关)如果将这个参数设为0表示,在influxdb运时
  # runtime.GOMAXPROCS(0)为50%(这个参数是GO语言设置go进程对cpu核心的使用的)任何大于0的数值则等同相同的compaction数值
  # 这个设置不影响缓存快照功能


  # The maximum series allowed per database before writes are dropped.  This limit can prevent
  # high cardinality issues at the database level.  This limit can be disabled by setting it to
  # 0.
  # max-series-per-database = 1000000
  # 每个数据库的最大series数值,设置这个数值可以避免高基数问题(就是内存要维护过多series导致OOM)
  # 如果你将其设置为0,则表示禁用该限制


  # The maximum number of tag values per tag that are allowed before writes are dropped.  This limit
  # can prevent high cardinality tag values from being written to a measurement.  This limit can be
  # disabled by setting it to 0.
  # max-values-per-tag = 100000
  # 对于同一个measurement允许的最大 不同tag数值,功能和上面的一样,防止高基数问题。同样设置为0可以禁用该设置
  # 所以在向influxdb导入数据应该大致考虑下数据库允许的series数与tag数是否满足需求

###
### [coordinator]
###
### Controls the clustering service configuration.
###
# 协同器配置,管理集群服务器配置

[coordinator]
  # The default time a write request will wait until a "timeout" error is returned to the caller.
  # write-timeout = "10s"
  # 写入超时时间

  # The maximum number of concurrent queries allowed to be executing at one time.  If a query is
  # executed and exceeds this limit, an error is returned to the caller.  This limit can be disabled
  # by setting it to 0.
  # max-concurrent-queries = 0
  # 数据库允许的最大查询并发数,如果达到设置上限后,还有人执行查询操作,那么influxdb将为其返回一个错误。
  # 设置为0则禁用该配置。

  # The maximum time a query will is allowed to execute before being killed by the system.  This limit
  # can help prevent run away queries.  Setting the value to 0 disables the limit.
  # query-timeout = "0s"
  # 一个查询请求在被系统杀死前,被运行执行的最大时间。这个参数可以避免一些 失控的查询行为 对程序可能造成的损害。
  # 比如说过大的查询导致内存不够用而使influxdb崩溃。设置为0则为禁用该配置

  # The time threshold when a query will be logged as a slow query.  This limit can be set to help
  # discover slow or resource intensive queries.  Setting the value to 0 disables the slow query logging.
  # log-queries-after = "0s"
  # 一条查询会被记录为"慢查询"的时间门槛。这个配置可以帮助你找到或区分哪些是慢查询,或者称之为资源密集查询。设置为0则为禁用

  # The maximum number of points a SELECT can process.  A value of 0 will make
  # the maximum point count unlimited.  This will only be checked every second so queries will not
  # be aborted immediately when hitting the limit.
  # max-select-point = 0
  # SELECT 语句一次可查询的最大points(时间点数据),如果设置为0则表示符合select条件,有多少条记录就返回多少条


  # The maximum number of series a SELECT can run.  A value of 0 will make the maximum series
  # count unlimited.
  # max-select-series = 0
  # SELECT 语句可查询series的最大数目

  # The maxium number of group by time bucket a SELECT can create.  A value of zero will max the maximum
  # number of buckets unlimited.
  # max-select-buckets = 0
  # SELECT 语句可创建的时间段限定条件的最大数目

###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###
# 保存策略,控制保留策略与清除过期数据

[retention]
  # Determines whether retention policy enforcement enabled.
  # enabled = true
  # 控制是否启用保存策略

  # The interval of time when retention policy enforcement checks run.
  # check-interval = "30m"
  # 系统每隔这个参数的时间就会去检查一下这些保留策略是不是在运行

###
### [shard-precreation]
###
### Controls the precreation of shards, so they are available before data arrives.
### Only shards that, after creation, will have both a start- and end-time in the
### future, will ever be created. Shards are never precreated that would be wholly
### or partially in the past.
# 分片预创建阶段
# 控制分片的预创建,使它们在存储实际数据之前就处于可用状态。(后面一段是讲未来的变动计划)

[shard-precreation]
  # Determines whether shard pre-creation service is enabled.
  # enabled = true
  # 是否开启分片预创建,默认开启

  # The interval of time when the check to pre-create new shards runs.
  # check-interval = "10m"
  # 每隔这个参数的时间就会去检查一下预创建新分片有没有在运行

  # The default period ahead of the endtime of a shard group that its successor
  # group is created.
  # advance-period = "30m"
  # 这个参数控制,在某分片组能存储的截至时间到来之前多久,就创建其继承者(说得很拟人,就是创建新的分片组)

###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.
# 系统自我监控、数据与诊断功能
# 

[monitor]
  # Whether to record statistics internally.
  # store-enabled = true
  # 是否记录内部统计数据

  # The destination database for recorded statistics
  # store-database = "_internal"
  # 存储内部统计数据的数据名称

  # The interval at which to record statistics
  # store-interval = "10s"
  # 存储统计数据的时间间隔

###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###
# http设置,设置http端点配置。这些配置与influxdb数据流入流出的基础机制有关

[http]
  # Determines whether HTTP endpoint is enabled.
  # enabled = true
  # 配置http端点是否启用,默认开启

  # The bind address used by the HTTP service.
  # bind-address = ":8086"
  # 如果使用HTTP传输服务,绑定端口,默认为8086

  # Determines whether user authentication is enabled over HTTP/HTTPS.
  # auth-enabled = false
  # 在HTTP/HTTPS上是否开启用户验证,默认关闭

  # The default realm sent back when issuing a basic auth challenge.
  # realm = "InfluxDB"
  # 当发起一次基础认证所范围的默认领域(不是很明白这个配置的意思)

  # Determines whether HTTP request logging is enabled.
  # log-enabled = true
  # 配置HTTP请求是否记录于日志

  # Determines whether detailed write logging is enabled.
  # write-tracing = false
  # 配置写入数据的细节内容是否记录于日志,默认为关闭

  # Determines whether the pprof endpoint is enabled.  This endpoint is used for
  # troubleshooting and monitoring.
  # pprof-enabled = true
  # 配置pprof端点是否开启,这个端点用于问题调试与监控

  # Determines whether HTTPS is enabled.
  # https-enabled = false
  # 配置HTTPS是否开启,默认为关闭

  # The SSL certificate to use when HTTPS is enabled.
  # https-certificate = "/etc/ssl/influxdb.pem"
  # 如果开启HTTPS,需要在这里配置SSL证书地址

  # Use a separate private key location.
  # https-private-key = ""
  # 配置https的本地分离私钥

  # The JWT auth shared secret to validate requests using JSON web tokens.
  # shared-secret = ""
  # JWT认证所使用的JSON网页token

  # The default chunk size for result sets that should be chunked.
  # max-row-limit = 0
  # 默认块大小

  # The maximum number of HTTP connections that may be open at once.  New connections that
  # would exceed this limit are dropped.  Setting this value to 0 disables the limit.
  # max-connection-limit = 0
  # 最大可同时创建的HTTP连接数。如果达到此数值,新的HTTP连接会被拒绝。设置为0则禁用该配置

  # Enable http service over unix domain socket
  # unix-socket-enabled = false
  # 允许在unix域套接字上启用http服务,默认关闭

  # The path of the unix domain socket.
  # bind-socket = "/var/run/influxdb.sock"
  # unix域套接字路径配置

  # The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit.
  # max-body-size = 25000000
  # 客户端发送请求的最大体积限制,单位为字节。设置为0则为禁用该配置


###
### [ifql]
###
### Configures the ifql RPC API.
###
# 配置ifql RPC接口

[ifql]
  # Determines whether the RPC service is enabled.
  # enabled = true
  # 是否开启RPC服务

  # Determines whether additional logging is enabled.
  # log-enabled = true
  # 是否在日志中记录这块内容

  # The bind address used by the ifql RPC service.
  # bind-address = ":8082"
  # ifql RPC服务的绑定端口


###
### [subscriber]
###
### Controls the subscriptions, which can be used to fork a copy of all data
### received by the InfluxDB host.
###
# 订阅者,控制订阅功能,是否允许从Influxdb主机上fork整份数据

[subscriber]
  # Determines whether the subscriber service is enabled.
  # enabled = true
  # 是否开启订阅者服务

  # The default timeout for HTTP writes to subscribers.
  # http-timeout = "30s"
  # 向订阅者通过HTTP写入的超时时间

  # Allows insecure HTTPS connections to subscribers.  This is useful when testing with self-
  # signed certificates.
  # insecure-skip-verify = false
  # 是否允许向订阅者创建不安全HTTPS连接,在测试自签证书时很有用

  # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
  # ca-certs = ""
  # PEM编码CA证书文件的路径,如果为空,则默认的系统证书会被应用

  # The number of writer goroutines processing the write channel.
  # write-concurrency = 40
  # 处理写入通道的 go协程数目

  # The number of in-flight writes buffered in the write channel.
  # write-buffer-size = 1000
  # 在写入通道的写入缓存大小


###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###
# 控制一个或多个Graphite数据监听器

[[graphite]]
  # Determines whether the graphite endpoint is enabled.
  # 设置graphite端点是否开启
  # enabled = false
  # database = "graphite"
  # 数据库名
  # retention-policy = ""
  # 保存策略
  # bind-address = ":2003"
  # 绑定端口
  # protocol = "tcp"
  # 连接协议
  # consistency-level = "one"
  # 一致性级别

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.
  # 下面的配置是有关于batching的设置。你最好打开这些配置,否则可能会导致丢失metrcis或遇到(influxdb)
  # 性能过低的情况。当有大量数据点要被写入时,Batching会在内存中缓存这些数据点

  # Flush if this many points get buffered
  # batch-size = 5000
  # 当这么多数据点被缓存后进行清理或刷新操作(flush)

  # number of batches that may be pending in memory
  # batch-pending = 10
  # 在内存中可被挂起的batch数量

  # Flush at least this often even if we haven't hit buffer limit
  # batch-timeout = "1s"
  # 如果我们没有达到缓存上限,也会以这个时间为最低值进行清理(flush)

  # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
  # udp-read-buffer = 0
  # UDP读取缓存大小,0代表采用系统默认值。如果这个值大于系统所设置的最大值,UDP监听器就会失效

  ### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
  # separator = "."
  # 这个配置可以提供对 最终measurement名称的控制?()

  ### Default tags that will be added to all metrics.  These can be overridden at the template level
  ### or by tags extracted from metric
  # tags = ["region=us-east", "zone=1c"]
  # 修改默认tags,来添加给所有metrics  这些可以被模板等级(template level)或者从metrci中提取的tags所改写

  ### Each template line requires a template pattern.  It can have an optional
  ### filter before the template and separated by spaces.  It can also have optional extra
  ### tags following the template.  Multiple tags should be separated by commas and no spaces
  ### similar to the line protocol format.  There can be only one default template.
  # 下面的模板配置,每一行都要求一个模板类型,在模板前可以用空格来分隔一个可选的过滤器。
  # 在下面的模板中还可以有可选的额外tags。和行协议格式(line protocol)一样,用逗号(不要有空格)来分隔多个tags
  # 也可以只有一个默认的模板

  # templates = [
  #   "*.app env.service.resource.measurement",
  #   # Default template
  #   "server.*",
  # ]

###
### [collectd]
###
### Controls one or many listeners for collectd data.
###
# collectd 控制一个或多个collectd 数据监听器

[[collectd]]
  # enabled = false
  # bind-address = ":25826"
  # database = "collectd"
  # retention-policy = ""
  #
  # The collectd service supports either scanning a directory for multiple types
  # db files, or specifying a single db file.
  # collectd不仅支持扫描一个目录下不同类型的文件,可以只扫描一个指定的db数据库文件
  # typesdb = "/usr/local/share/collectd"
  #
  # security-level = "none"
  # 安全级别
  # auth-file = "/etc/collectd/auth_file"
  # 认证文件路径

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.
  # 这一行和上面graphite一样,描述batch工作的

  # Flush if this many points get buffered
  # batch-size = 5000

  # Number of batches that may be pending in memory
  # batch-pending = 10

  # Flush at least this often even if we haven't hit buffer limit
  # batch-timeout = "10s"

  # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
  # read-buffer = 0

  # Multi-value plugins can be handled two ways.
  # "split" will parse and store the multi-value plugin data into separate measurements
  # "join" will parse and store the multi-value plugin as a single multi-value measurement.
  # "split" is the default behavior for backward compatability with previous versions of influxdb.
  # 多值插件(multi-value plugin)可以用两种方式来运行
  # "split"会将多值插件解析并保存到单独的measurement中
  # "join" 则会把它们像一个单值一样解析并保存
  # parse-multivalue-plugin = "split"

###
### [opentsdb]
###
### Controls one or many listeners for OpenTSDB data.
###
# opentsdb的相关配置

[[opentsdb]]
  # enabled = false
  # bind-address = ":4242"
  # database = "opentsdb"
  # retention-policy = ""
  # consistency-level = "one"
  # 一致性等级
  # tls-enabled = false
  # 是否开启tls安全传输
  # certificate= "/etc/ssl/influxdb.pem"
  # 证书路径

  # Log an error for every malformed point.
  # log-point-errors = true
  # 如果出现畸形、残缺的数据点,则在日志中以error等级记录

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Only points
  # metrics received over the telnet protocol undergo batching.
  # batch相关设置

  # Flush if this many points get buffered
  # batch-size = 1000

  # Number of batches that may be pending in memory
  # batch-pending = 5

  # Flush at least this often even if we haven't hit buffer limit
  # batch-timeout = "1s"

###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###
# 通过UDP协议来监听InfluxDB行协议数据的控制配置

[[udp]]
  # enabled = false
  # bind-address = ":8089"
  # database = "udp"
  # retention-policy = ""

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # Flush if this many points get buffered
  # batch-size = 5000

  # Number of batches that may be pending in memory
  # batch-pending = 10

  # Will flush at least this often even if we haven't hit buffer limit
  # batch-timeout = "1s"

  # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
  # read-buffer = 0

###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###
# InfluxDB中的持续查询的相关配置(continuous queries)

[continuous_queries]
  # Determines whether the continuous query service is enabled.
  # 是否开启持续查询
  # enabled = true

  # Controls whether queries are logged when executed by the CQ service.
  # log-enabled = true
  # 当持续查询被执行时时候记录日志

  # Controls whether queries are logged to the self-monitoring data store.
  # query-stats-enabled = false
  # 控制查询是否被influxdb的自我监控数据所存储

  # interval for how often continuous queries will be checked if they need to run
  # run-interval = "1s"
  # 如果CQ需要被运行,则对它们进行检查的时间间隔将在这里被设置

免责声明:文章转载自《influxdb 配置文件注释》仅用于学习参考。如对内容有疑问,请及时联系本站处理。

上篇结合丰富示例深入讲解Ajax架构和最佳实践——《深入Ajax:架构与最佳实践》Jmeter(二)获取系统时间,并在系统时间上进行增减下篇

宿迁高防,2C2G15M,22元/月;香港BGP,2C5G5M,25元/月 雨云优惠码:MjYwNzM=

相关文章

android 网络编程--socket tcp/ip udp http之间的关系

网络七层由下往上分别为物理层、数据链路层、网络层、传输层、会话层、表示层和应用层,一般编程人员接触最多的就是应用层和运输层,再往下的就是所谓的媒体层了,不是我们研究的对象。 下面是应用层、运输层,网络层、链路层通信协议概图。我们经常接触到的一般是: http协议:应用层协议,并且http协议是基于tcp连接的,主要解决的是如何包装协议的 tcp协议:运输层...

使用作业定时压缩数据库

有一个项目上的数据库使用了几个月.mdf文件与.ldf文件加起来竟然达到了100G+,这样下去硬盘会被撑爆的。 这样的原因是因为异常情况造成某个数据表中有大量的冗余数据,将异常数据表的冗余数据清理之后,mdf文件并没有变小,因为对数据库进行了删除操作,ldf文件反而变增大很多!根据项目情况考虑使用作业方式定时压缩数据文件大小,压缩之后只有十几兆的大小。 S...

二、Metrics指标类型

Prometheus 的客户端库中提供了四种核心的指标类型。但这些类型只是在客户端库(客户端可以根据不同的数据类型调用不同的 API 接口)和在线协议中,实际在 Prometheus server 中并不对指标类型进行区分,而是简单地把这些指标统一视为无类型的时间序列 2.1、Counter (计数器) ​ Counter 类型代表一种样本数据单调递增的指...

使用U盘安装mint

系统坏了,重新装的时候,硬盘甚至都没法格式化。。。所以,狠狠心买了块固态硬盘,123G,威刚。 想自己装Linux系统,这样用起来更方便一点,不用装虚拟机,然后再跑linux什么的。最后选了mint。 好嘛,问题来了,怎么装上去? 其实,找张空盘,把系统刻上去,这可能是最省力的方法了。但是,问题是另一台笔记本没有光驱。so。。。只能考虑u盘。 最初,是想尝...

C#中实现web端展示JT文件

最近在公司项目中,碰到了需要在web端展示JT格式文件的3D图形,找了好多资料,最后实现了。 主要是将*.jt文件转换成*.html文件,然后将html文件用iframe嵌入到网页中展示。 效果为: 那么如何将*.jt文件转换成*.html文件呢? 那就需要先安装西门子的一个免费的软件:JT2GO 提取码:c7kh。 装完之后,JT2GO软件本身有将JT...

glog 入门简介 foreveryl 博客园

glog 入门简介 - foreveryl - 博客园     foreveryl    glog 入门简介    Glog的简单入门,glog虽然在配置参数方面比较麻烦,但是在小规模程序中,由于其简单灵活,也许会有优势。         0,  glog 是google的开源日志系统,相比较log4系列的日志系统,它更加轻巧灵活,而且功能也比较完善。 结...