Commit 8a73eb4b authored by Nitesh Jain's avatar Nitesh Jain

Imported Upstream version 1.0.1

parents
/.bundle/
/.yardoc
/Gemfile.lock
/_yardoc/
/coverage/
/doc/
/pkg/
/spec/reports/
/tmp/
*.bundle
*.so
*.o
*.a
mkmf.log
source 'https://rubygems.org'
# Specify your gem's dependencies in model_tokenizer.gemspec
gemspec
gem "rails"
Copyright (c) 2014 Adib Saad
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ModelTokenizer
Generates random tokens that models can be accessed by. Instead of
```
somesite.com/video/71589
```
you'll get
```
somesite.com/video/j5-drkENpSDBNRds
```
## Installation
Add this line to your application's Gemfile:
```ruby
gem 'model_tokenizer'
```
And then execute:
$ bundle
Or install it yourself as:
$ gem install model_tokenizer
## Usage
1. Run
$ rails g model_tokenizer MODEL_NAME [field:type field:type ... ]
to create a new tokenized model. If the model already exists, ModelTokenizer will integrate into it by injecting the following code
```ruby
extend ModelTokenizer
has_token
```
The appropriate migration will also be created, which will create the ```token``` field and its associated unique index.
The default token length is 14, but you can change it (no lower than 8)
```ruby
has_token :length => 16
```
2. In the model file, make sure the following line is there:
```ruby
self.primary_key = :token
```
The generator will automatically inject this, but if you're doing something weird that involves manually installing ModelTokenizer without using the generators, make sure the aforementioned line exists.
## Notes
ModelTokenizer generates tokens from the following charset:
```
a b c d e f g h i j k m n o p q r s t u v w x y z
A B C D E F G H J K L M N P R S T W X Y Z
2 3 4 5 6 7 8 9
- _
```
As you may have noticed, the following ambiguous characters have been removed
* Lowercase: l
* Uppercase: I, O, Q, U, V
* Numerals: 1, 0
However, the gem doesn't check for awkward tokens that could be confusing, has too many repeating characters, too many underscores/hyphens or otherwise makes someone raise an eyebrow (e.g. DXMHMHLALAH, _-aj-a2j6f-qacins-). Additionally, ModelTokenizer doesn't detect whether or not it has run out of combinations for
generating new tokens, though this will be dealt with in the future.
ModelTokenizer has been tested with Rails 3 and 4.
## Contributing
1. Fork it ( https://github.com/adibsaad/model_tokenizer/fork )
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create a new Pull Request
require "bundler/gem_tasks"
require "rake/testtask"
desc 'Test model_tokenizer.'
Rake::TestTask.new do |t|
t.libs << 'test'
t.test_files = FileList['test/*_test.rb']
t.verbose = true
end
desc "Run tests"
task :default => :test
require 'rails/generators/active_record'
require 'rails/generators/migration'
module ModelTokenizer
module Generators
class ModelTokenizerGenerator < ActiveRecord::Generators::Base
include Rails::Generators::Migration
argument :attributes, type: :array, default: [], banner: "field:type field:type"
namespace "model_tokenizer"
source_root File.expand_path("../templates", __FILE__)
desc "Creates a model with the NAME argument. "\
"If the model already exists, the appropriate code will be appended instead. "\
"In either case, the appropriate migration will be created."
def create_migration_file
if (behavior == :invoke && model_exists?) || (behavior == :revoke && migration_exists?(table_name))
migration_template "migration_existing.rb", "db/migrate/add_model_tokenizer_token_to_#{table_name}.rb"
else
migration_template "migration.rb", "db/migrate/model_tokenizer_create_#{table_name}.rb"
end
end
def generate_model
invoke "active_record:model", [name], migration: false unless model_exists? && behavior == :invoke
end
def inject_model_tokenizer_content
content = model_contents
class_path = if namespaced?
class_name.to_s.split("::")
else
[class_name]
end
indent_depth = class_path.size - 1
content = content.split("\n").map { |line| " " * indent_depth + line } .join("\n") << "\n"
inject_into_class(model_path, class_path.last, content) if model_exists?
end
private
def migration_data
<<RUBY
t.string :token, :null => false, :default => ""
RUBY
end
def model_contents
<<-CONTENT
extend ModelTokenizer
has_token #:length => 14
self.primary_key = :token
CONTENT
end
def model_exists?
File.exists?(File.join(destination_root, model_path))
end
def migration_path
@migration_path ||= File.join("db", "migrate")
end
def model_path
@model_path ||= File.join("app", "models", "#{file_path}.rb")
end
def migration_exists?(table_name)
Dir.glob("#{File.join(destination_root, migration_path)}/[0-9]*_*.rb").grep(/\d+_add_model_tokenizer_token_to_#{table_name}.rb$/).first
end
end
end
end
class ModelTokenizerCreate<%= table_name.camelize %> < ActiveRecord::Migration
def change
create_table(:<%= table_name %>) do |t|
<% attributes.each do |attribute| -%>
t.<%= attribute.type %> :<%= attribute.name %>
<% end -%>
<%= migration_data -%>
t.timestamps
end
add_index :<%= table_name %>, :token, unique: true
end
end
class AddModelTokenizerTokenTo<%= table_name.camelize %> < ActiveRecord::Migration
def self.up
change_table(:<%= table_name %>) do |t|
<% attributes.each do |attribute| -%>
t.<%= attribute.type %> :<%= attribute.name %>
<% end -%>
<%= migration_data -%>
end
add_index :<%= table_name %>, :token, unique: true
end
def self.down
# Impelement rollback yourself.
raise ActiveRecord::IrreversibleMigration
end
end
require "model_tokenizer/version"
require "model_tokenizer/base"
module ModelTokenizer
def self.extended(base)
return if base.respond_to? :model_tokenizer
base.class_eval do
extend Base
before_create :generate_token
end
end
def self.included(base)
base.extend self
end
end
module ModelTokenizer
module Base
CHARSET = %w{
a b c d e f g h i j k m n o p q r s t u v w x y z
A B C D E F G H J K L M N P R S T W X Y Z
2 3 4 5 6 7 8 9
- _
}
#Default length is 14 characters. Provides
#56!/(14!*(56-14)!) = 5,804,731,963,800 unique tokens.
@@model_tokenizer_token_length = 14
def model_tokenizer_token_length
@@model_tokenizer_token_length
end
def has_token(*attributes)
options = {
:length => @@model_tokenizer_token_length
}.merge!(attributes.last.is_a?(Hash) ? attributes.pop : {})
if(!options[:length].is_a?(Integer) || options[:length] < 8)
options[:length] = @@model_tokenizer_token_length
end
@@model_tokenizer_token_length = options[:length]
include InstanceMethods
end
module InstanceMethods
protected
def generate_token
self.token = loop do
random_token = (0...self.class.model_tokenizer_token_length).map{CHARSET[rand(CHARSET.size)]}.join
break random_token unless self.class.exists?(:token => random_token)
end
end
end
end
end
module ModelTokenizer
VERSION = "1.0.1"
end
--- !ruby/object:Gem::Specification
name: model_tokenizer
version: !ruby/object:Gem::Version
version: 1.0.1
platform: ruby
authors:
- Adib Saad
autorequire:
bindir: bin
cert_chain: []
date: 2014-11-14 00:00:00.000000000 Z
dependencies:
- !ruby/object:Gem::Dependency
name: bundler
requirement: !ruby/object:Gem::Requirement
requirements:
- - "~>"
- !ruby/object:Gem::Version
version: '1.7'
type: :development
prerelease: false
version_requirements: !ruby/object:Gem::Requirement
requirements:
- - "~>"
- !ruby/object:Gem::Version
version: '1.7'
- !ruby/object:Gem::Dependency
name: rake
requirement: !ruby/object:Gem::Requirement
requirements:
- - "~>"
- !ruby/object:Gem::Version
version: '10.0'
type: :development
prerelease: false
version_requirements: !ruby/object:Gem::Requirement
requirements:
- - "~>"
- !ruby/object:Gem::Version
version: '10.0'
- !ruby/object:Gem::Dependency
name: activerecord
requirement: !ruby/object:Gem::Requirement
requirements:
- - "~>"
- !ruby/object:Gem::Version
version: '4.0'
type: :development
prerelease: false
version_requirements: !ruby/object:Gem::Requirement
requirements:
- - "~>"
- !ruby/object:Gem::Version
version: '4.0'
- !ruby/object:Gem::Dependency
name: sqlite3
requirement: !ruby/object:Gem::Requirement
requirements:
- - ">="
- !ruby/object:Gem::Version
version: '0'
type: :development
prerelease: false
version_requirements: !ruby/object:Gem::Requirement
requirements:
- - ">="
- !ruby/object:Gem::Version
version: '0'
- !ruby/object:Gem::Dependency
name: minitest
requirement: !ruby/object:Gem::Requirement
requirements:
- - ">="
- !ruby/object:Gem::Version
version: '0'
type: :development
prerelease: false
version_requirements: !ruby/object:Gem::Requirement
requirements:
- - ">="
- !ruby/object:Gem::Version
version: '0'
description: |-
ModelTokenizer creates random tokens to be
used as primary keys for ActiveRecord objects
email:
- adib.saad@gmail.com
executables: []
extensions: []
extra_rdoc_files: []
files:
- ".gitignore"
- Gemfile
- LICENSE.txt
- README.md
- Rakefile
- lib/generators/model_tokenizer/model_tokenizer_generator.rb
- lib/generators/model_tokenizer/templates/migration.rb
- lib/generators/model_tokenizer/templates/migration_existing.rb
- lib/model_tokenizer.rb
- lib/model_tokenizer/base.rb
- lib/model_tokenizer/version.rb
- model_tokenizer.gemspec
- test/databases.yml
- test/helper.rb
- test/schema.rb
- test/token_generator_test.rb
homepage: https://github.com/adibsaad/model_tokenizer
licenses:
- MIT
metadata: {}
post_install_message:
rdoc_options: []
require_paths:
- lib
required_ruby_version: !ruby/object:Gem::Requirement
requirements:
- - ">="
- !ruby/object:Gem::Version
version: '0'
required_rubygems_version: !ruby/object:Gem::Requirement
requirements:
- - ">="
- !ruby/object:Gem::Version
version: '0'
requirements: []
rubyforge_project:
rubygems_version: 2.2.2
signing_key:
specification_version: 4
summary: Random token generator for Rails models
test_files:
- test/databases.yml
- test/helper.rb
- test/schema.rb
- test/token_generator_test.rb
has_rdoc:
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'model_tokenizer/version'
Gem::Specification.new do |spec|
spec.name = "model_tokenizer"
spec.version = ModelTokenizer::VERSION
spec.authors = ["Adib Saad"]
spec.email = ["adib.saad@gmail.com"]
spec.summary = %q{Random token generator for Rails models}
spec.homepage = "https://github.com/adibsaad/model_tokenizer"
spec.license = "MIT"
spec.description =
%q{ModelTokenizer creates random tokens to be
used as primary keys for ActiveRecord objects}
spec.files = `git ls-files`.split("\n")
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_development_dependency "bundler", "~> 1.7"
spec.add_development_dependency "rake", "~> 10.0"
spec.add_development_dependency "activerecord", "~> 4.0"
spec.add_development_dependency "sqlite3"
spec.add_development_dependency "minitest"
end
sqlite3:
adapter: sqlite3
database: ":memory:"
encoding: utf8
require "bundler/setup"
# if ENV['COVERAGE']
# require 'coveralls'
# Coveralls.wear!
# end
require "active_record"
#Put all your 'at_exit's that you want executed after
#running your tests BEFORE requiring minitest,
#as minitest uses at_exit to run the tests.
#Putting the follow line after requiring minitest will
#close the database connection before the first test runs.
at_exit {ActiveRecord::Base.connection.disconnect!}
require "minitest/autorun"
Minitest::Test = MiniTest::Unit::TestCase unless defined?(Minitest::Test)
#require "mocha/setup"
# if ENV["COVERAGE"]
# require 'simplecov'
# SimpleCov.start do
# add_filter "test/"
# add_filter "friendly_id/migration"
# end
# end
# I18n.enforce_available_locales = false
require 'model_tokenizer'
# If you want to see the ActiveRecord log, invoke the tests using `rake test LOG=true`
# if ENV["LOG"]
# require "logger"
# ActiveRecord::Base.logger = Logger.new($stdout)
# end
module ModelTokenizer
module Test
def self.included(base)
MiniTest::Unit.autorun
end
def transaction
ActiveRecord::Base.transaction { yield ; raise ActiveRecord::Rollback }
end
def with_instance_of(*args)
model_class = args.shift
args[0] ||= {:data => "some data goes here"}
transaction { yield model_class.create!(*args) }
end
module Database
extend self
def connect
version = ActiveRecord::VERSION::STRING
driver = ModelTokenizer::Test::Database.driver
engine = RUBY_ENGINE rescue "ruby"
ActiveRecord::Base.establish_connection config[driver]
message = "Using #{engine} #{RUBY_VERSION} AR #{version} with #{driver}"
puts "-" * 72
if in_memory?
ActiveRecord::Migration.verbose = false
Schema.migrate :up
puts "#{message} (in-memory)"
else
puts message
end
end
def config
@config ||= YAML::load(File.open(File.expand_path("../databases.yml", __FILE__)))
end
def driver
(ENV["DB"] or "sqlite3").downcase
end
def in_memory?
config[driver]["database"] == ":memory:"
end
end
end
end
class Module
def test(name, &block)
define_method("test_#{name.gsub(/[^a-z0-9']/i, "_")}".to_sym, &block)
end
end
require "schema"
ModelTokenizer::Test::Database.connect
module ModelTokenizer
module Test
class Schema < ActiveRecord::Migration
class << self
def down
drop_table :cars
end
def up
# TODO: use schema version to avoid ugly hacks like this
return if @done
create_table :cars do |t|
t.string :data
t.string :token
end
create_table :trucks do |t|
t.string :data
t.string :token
end
@done = true
end
end
end
end
end
require "helper"
class Car < ActiveRecord::Base
extend ModelTokenizer
has_token
end
class Truck < ActiveRecord::Base
extend ModelTokenizer
has_token :length => 16
end
class TokenGenerator < MiniTest::Test
include ModelTokenizer::Test
def setup
Car.all.each(&:destroy)
Truck.all.each(&:destroy)
end
def test_that_tokens_are_created_for_models
with_instance_of(Car) do |record|
assert record.token, "Token is nil"
assert record.token.length == Car::model_tokenizer_token_length,
"Token length is not #{Car::model_tokenizer_token_length}"
record.token.split("").each do |c|
assert ModelTokenizer::Base::CHARSET.include?(c), "#{c} doesn't belong in the acceptable character set"
end
end
with_instance_of(Truck) do |record|
assert record.token, "Token is nil"
assert record.token.length == Truck::model_tokenizer_token_length,
"Token length is not #{Truck::model_tokenizer_token_length}"
record.token.split("").each do |c|
assert ModelTokenizer::Base::CHARSET.include?(c), "#{c} doesn't belong in the acceptable character set"
end
end
end
end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment